aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acorn/char/pcf8583.c3
-rw-r--r--drivers/base/attribute_container.c5
-rw-r--r--drivers/base/class.c23
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/firmware_class.c9
-rw-r--r--drivers/base/map.c3
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/block/cciss.c636
-rw-r--r--drivers/block/cciss.h10
-rw-r--r--drivers/block/cciss_cmd.h8
-rw-r--r--drivers/block/cciss_scsi.c69
-rw-r--r--drivers/block/ll_rw_blk.c38
-rw-r--r--drivers/block/paride/pf.c22
-rw-r--r--drivers/block/pktcdvd.c85
-rw-r--r--drivers/block/scsi_ioctl.c1
-rw-r--r--drivers/block/ub.c55
-rw-r--r--drivers/bluetooth/hci_usb.c19
-rw-r--r--drivers/bluetooth/hci_usb.h5
-rw-r--r--drivers/char/agp/hp-agp.c2
-rw-r--r--drivers/char/amiserial.c4
-rw-r--r--drivers/char/drm/drm_drv.c2
-rw-r--r--drivers/char/drm/drm_proc.c2
-rw-r--r--drivers/char/epca.c84
-rw-r--r--drivers/char/epca.h12
-rw-r--r--drivers/char/hpet.c1
-rw-r--r--drivers/char/hvc_console.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c6
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/n_r3964.c84
-rw-r--r--drivers/char/vt.c5
-rw-r--r--drivers/char/watchdog/Kconfig93
-rw-r--r--drivers/char/watchdog/Makefile7
-rw-r--r--drivers/char/watchdog/i6300esb.c527
-rw-r--r--drivers/char/watchdog/ibmasr.c405
-rw-r--r--drivers/char/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/char/watchdog/mv64x60_wdt.c252
-rw-r--r--drivers/char/watchdog/pcwd_pci.c44
-rw-r--r--drivers/char/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/char/watchdog/sbc8360.c414
-rw-r--r--drivers/char/watchdog/w83977f_wdt.c543
-rw-r--r--drivers/connector/cn_queue.c32
-rw-r--r--drivers/connector/connector.c74
-rw-r--r--drivers/firmware/dell_rbu.c241
-rw-r--r--drivers/hwmon/Kconfig9
-rw-r--r--drivers/hwmon/hdaps.c369
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-keywest.c1
-rw-r--r--drivers/i2c/busses/i2c-pmac-smu.c316
-rw-r--r--drivers/i2c/busses/i2c-pxa.c12
-rw-r--r--drivers/ide/ide-iops.c41
-rw-r--r--drivers/ide/ide-taskfile.c1
-rw-r--r--drivers/ide/legacy/ide-cs.c2
-rw-r--r--drivers/ide/pci/cmd64x.c2
-rw-r--r--drivers/ide/pci/hpt34x.c2
-rw-r--r--drivers/ieee1394/amdtp.c1
-rw-r--r--drivers/ieee1394/csr1212.h1
-rw-r--r--drivers/ieee1394/dv1394.c1
-rw-r--r--drivers/ieee1394/eth1394.c12
-rw-r--r--drivers/ieee1394/eth1394.h6
-rw-r--r--drivers/ieee1394/hosts.c3
-rw-r--r--drivers/ieee1394/hosts.h8
-rw-r--r--drivers/ieee1394/ieee1394_core.c32
-rw-r--r--drivers/ieee1394/nodemgr.c23
-rw-r--r--drivers/ieee1394/ohci1394.c4
-rw-r--r--drivers/ieee1394/raw1394.c1
-rw-r--r--drivers/ieee1394/sbp2.c107
-rw-r--r--drivers/ieee1394/video1394.c1
-rw-r--r--drivers/infiniband/core/mad_rmpp.c19
-rw-r--r--drivers/infiniband/core/user_mad.c5
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c120
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c18
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c19
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/input.c1
-rw-r--r--drivers/input/keyboard/Kconfig11
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/spitzkbd.c478
-rw-r--r--drivers/input/touchscreen/Kconfig6
-rw-r--r--drivers/input/touchscreen/corgi_ts.c70
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c6
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c1
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c2
-rw-r--r--drivers/isdn/hisax/st5481.h4
-rw-r--r--drivers/isdn/hisax/st5481_b.c20
-rw-r--r--drivers/isdn/hisax/st5481_d.c26
-rw-r--r--drivers/isdn/hisax/st5481_init.c4
-rw-r--r--drivers/isdn/hisax/st5481_usb.c68
-rw-r--r--drivers/isdn/hysdn/hysdn_procconf.c2
-rw-r--r--drivers/isdn/sc/init.c4
-rw-r--r--drivers/macintosh/smu.c1032
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/macintosh/therm_pm72.c2
-rw-r--r--drivers/macintosh/therm_windtunnel.c2
-rw-r--r--drivers/md/dm-ioctl.c9
-rw-r--r--drivers/md/dm-mpath.c16
-rw-r--r--drivers/md/raid6.h4
-rw-r--r--drivers/md/raid6algos.c1
-rw-r--r--drivers/md/raid6altivec.uc18
-rw-r--r--drivers/md/raid6test/Makefile27
-rw-r--r--drivers/media/dvb/frontends/tda10021.c4
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-cadet.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-sf16fmi.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-typhoon.c2
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/video/bttv-cards.c2
-rw-r--r--drivers/media/video/bttv-driver.c14
-rw-r--r--drivers/media/video/bttvp.h2
-rw-r--r--drivers/media/video/cpia.c2
-rw-r--r--drivers/media/video/cx88/cx88-dvb.c7
-rw-r--r--drivers/media/video/rds.h2
-rw-r--r--drivers/media/video/saa6588.c4
-rw-r--r--drivers/message/fusion/Kconfig17
-rw-r--r--drivers/message/fusion/Makefile1
-rw-r--r--drivers/message/fusion/mptbase.c963
-rw-r--r--drivers/message/fusion/mptbase.h56
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c2
-rw-r--r--drivers/message/fusion/mptlan.c7
-rw-r--r--drivers/message/fusion/mptsas.c1235
-rw-r--r--drivers/message/fusion/mptscsih.c463
-rw-r--r--drivers/message/fusion/mptscsih.h7
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/config-osm.c5
-rw-r--r--drivers/mfd/ucb1x00-ts.c4
-rw-r--r--drivers/mtd/devices/docecc.c8
-rw-r--r--drivers/mtd/maps/bast-flash.c1
-rw-r--r--drivers/mtd/maps/ixp2000.c1
-rw-r--r--drivers/mtd/maps/ixp4xx.c3
-rw-r--r--drivers/mtd/maps/omap_nor.c1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c1
-rw-r--r--drivers/mtd/nand/sharpsl.c10
-rw-r--r--drivers/net/8139cp.c46
-rw-r--r--drivers/net/8390.c2
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/arcnet/com90io.c4
-rw-r--r--drivers/net/arm/am79c961a.c22
-rw-r--r--drivers/net/bmac.c2
-rw-r--r--drivers/net/bonding/bond_main.c16
-rw-r--r--drivers/net/cassini.c5237
-rw-r--r--drivers/net/cassini.h4425
-rw-r--r--drivers/net/cs89x0.c1
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/net/ibmveth.c6
-rw-r--r--drivers/net/irda/vlsi_ir.c4
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/pppoe.c4
-rw-r--r--drivers/net/r8169.c7
-rw-r--r--drivers/net/s2io.c9
-rw-r--r--drivers/net/sk98lin/skge.c20
-rw-r--r--drivers/net/skge.c312
-rw-r--r--drivers/net/skge.h4
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/tg3.c215
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/wan/sdlamain.c23
-rw-r--r--drivers/net/wan/syncppp.c2
-rw-r--r--drivers/net/wireless/airo.c5
-rw-r--r--drivers/net/wireless/orinoco.c2
-rw-r--r--drivers/net/wireless/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/strip.c4
-rw-r--r--drivers/parisc/led.c5
-rw-r--r--drivers/pci/hotplug.c4
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c2
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c4
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c6
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c3
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c16
-rw-r--r--drivers/pci/probe.c22
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/cardbus.c5
-rw-r--r--drivers/pcmcia/omap_cf.c1
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c12
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c10
-rw-r--r--drivers/pcmcia/ti113x.h115
-rw-r--r--drivers/pcmcia/yenta_socket.c75
-rw-r--r--drivers/s390/cio/blacklist.c4
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/crypto/z90main.c3
-rw-r--r--drivers/s390/net/ctcmain.c41
-rw-r--r--drivers/s390/net/qeth.h4
-rw-r--r--drivers/s390/net/qeth_main.c137
-rw-r--r--drivers/s390/net/qeth_sys.c17
-rw-r--r--drivers/s390/scsi/Makefile2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c184
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c10
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c995
-rw-r--r--drivers/s390/scsi/zfcp_def.h307
-rw-r--r--drivers/s390/scsi/zfcp_erp.c135
-rw-r--r--drivers/s390/scsi/zfcp_ext.h30
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c769
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h54
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c30
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c297
-rw-r--r--drivers/s390/scsi/zfcp_sysfs_adapter.c14
-rw-r--r--drivers/sbus/char/display7seg.c2
-rw-r--r--drivers/scsi/3w-9xxx.c55
-rw-r--r--drivers/scsi/3w-9xxx.h17
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c283
-rw-r--r--drivers/scsi/aacraid/aacraid.h17
-rw-r--r--drivers/scsi/aacraid/comminit.c17
-rw-r--r--drivers/scsi/aacraid/commsup.c581
-rw-r--r--drivers/scsi/aacraid/linit.c12
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c17
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c11
-rw-r--r--drivers/scsi/ata_piix.c1
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/atp870u.h5
-rw-r--r--drivers/scsi/fd_mcs.c2
-rw-r--r--drivers/scsi/hosts.c37
-rw-r--r--drivers/scsi/ibmmca.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c10
-rw-r--r--drivers/scsi/libata-core.c118
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c6
-rw-r--r--drivers/scsi/megaraid.c70
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid9
-rw-r--r--drivers/scsi/megaraid/Makefile1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c2805
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1142
-rw-r--r--drivers/scsi/mesh.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_sis.c4
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c78
-rw-r--r--drivers/scsi/scsi_ioctl.c2
-rw-r--r--drivers/scsi/scsi_lib.c12
-rw-r--r--drivers/scsi/scsi_scan.c116
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/scsi_transport_sas.c9
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/serial/21285.c2
-rw-r--r--drivers/serial/amba-pl010.c2
-rw-r--r--drivers/serial/amba-pl011.c2
-rw-r--r--drivers/serial/clps711x.c4
-rw-r--r--drivers/serial/imx.c2
-rw-r--r--drivers/serial/ioc4_serial.c12
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/serial/pxa.c4
-rw-r--r--drivers/serial/s3c2410.c6
-rw-r--r--drivers/serial/sa1100.c2
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--drivers/serial/serial_lh7a40x.c2
-rw-r--r--drivers/tc/zs.c2
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/usb.c6
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c4
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.h8
-rw-r--r--drivers/usb/host/ohci-lh7a404.c2
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-s3c2410.c1
-rw-r--r--drivers/usb/host/sl811-hcd.c16
-rw-r--r--drivers/usb/media/vicam.c4
-rw-r--r--drivers/usb/net/pegasus.c29
-rw-r--r--drivers/usb/serial/airprime.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c8
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/video/Kconfig5
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/aty/radeon_pm.c14
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/aty/xlinit.c11
-rw-r--r--drivers/video/backlight/corgi_bl.c33
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/console/fbcon.h2
-rw-r--r--drivers/video/console/font_10x18.c4
-rw-r--r--drivers/video/console/font_6x11.c4
-rw-r--r--drivers/video/console/font_7x14.c4
-rw-r--r--drivers/video/console/font_8x16.c4
-rw-r--r--drivers/video/console/font_8x8.c4
-rw-r--r--drivers/video/console/font_acorn_8x8.c4
-rw-r--r--drivers/video/console/font_mini_4x6.c4
-rw-r--r--drivers/video/console/font_pearl_8x8.c4
-rw-r--r--drivers/video/console/font_sun12x22.c4
-rw-r--r--drivers/video/console/font_sun8x16.c4
-rw-r--r--drivers/video/console/fonts.c9
-rw-r--r--drivers/video/console/vgacon.c4
-rw-r--r--drivers/video/cyblafb.c11
-rw-r--r--drivers/video/fbcvt.c8
-rw-r--r--drivers/video/i810/i810-i2c.c16
-rw-r--r--drivers/video/imxfb.c3
-rw-r--r--drivers/video/intelfb/intelfbdrv.c21
-rw-r--r--drivers/video/matrox/matroxfb_base.c13
-rw-r--r--drivers/video/nvidia/nv_i2c.c11
-rw-r--r--drivers/video/nvidia/nvidia.c5
-rw-r--r--drivers/video/pm3fb.c3
-rw-r--r--drivers/video/pxafb.c108
-rw-r--r--drivers/video/pxafb.h9
-rw-r--r--drivers/video/s3c2410fb.c4
-rw-r--r--drivers/video/savage/savagefb-i2c.c11
-rw-r--r--drivers/video/savage/savagefb.h4
-rw-r--r--drivers/video/savage/savagefb_driver.c11
331 files changed, 25418 insertions, 4194 deletions
diff --git a/drivers/acorn/char/pcf8583.c b/drivers/acorn/char/pcf8583.c
index 141b4c237a50..2b850e5860a0 100644
--- a/drivers/acorn/char/pcf8583.c
+++ b/drivers/acorn/char/pcf8583.c
@@ -23,12 +23,13 @@ static struct i2c_driver pcf8583_driver;
23 23
24static unsigned short ignore[] = { I2C_CLIENT_END }; 24static unsigned short ignore[] = { I2C_CLIENT_END };
25static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END }; 25static unsigned short normal_addr[] = { 0x50, I2C_CLIENT_END };
26static unsigned short *forces[] = { NULL };
26 27
27static struct i2c_client_address_data addr_data = { 28static struct i2c_client_address_data addr_data = {
28 .normal_i2c = normal_addr, 29 .normal_i2c = normal_addr,
29 .probe = ignore, 30 .probe = ignore,
30 .ignore = ignore, 31 .ignore = ignore,
31 .force = ignore, 32 .forces = forces,
32}; 33};
33 34
34#define DAT(x) ((unsigned int)(x->dev.driver_data)) 35#define DAT(x) ((unsigned int)(x->dev.driver_data))
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 373e7b728fa7..6b2eb6f39b4d 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -152,12 +152,13 @@ attribute_container_add_device(struct device *dev,
152 152
153 if (!cont->match(cont, dev)) 153 if (!cont->match(cont, dev))
154 continue; 154 continue;
155 ic = kmalloc(sizeof(struct internal_container), GFP_KERNEL); 155
156 ic = kzalloc(sizeof(*ic), GFP_KERNEL);
156 if (!ic) { 157 if (!ic) {
157 dev_printk(KERN_ERR, dev, "failed to allocate class container\n"); 158 dev_printk(KERN_ERR, dev, "failed to allocate class container\n");
158 continue; 159 continue;
159 } 160 }
160 memset(ic, 0, sizeof(struct internal_container)); 161
161 ic->cont = cont; 162 ic->cont = cont;
162 class_device_initialize(&ic->classdev); 163 class_device_initialize(&ic->classdev);
163 ic->classdev.dev = get_device(dev); 164 ic->classdev.dev = get_device(dev);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index d164c32a97ad..ce23dc8c18c5 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -189,12 +189,11 @@ struct class *class_create(struct module *owner, char *name)
189 struct class *cls; 189 struct class *cls;
190 int retval; 190 int retval;
191 191
192 cls = kmalloc(sizeof(struct class), GFP_KERNEL); 192 cls = kzalloc(sizeof(*cls), GFP_KERNEL);
193 if (!cls) { 193 if (!cls) {
194 retval = -ENOMEM; 194 retval = -ENOMEM;
195 goto error; 195 goto error;
196 } 196 }
197 memset(cls, 0x00, sizeof(struct class));
198 197
199 cls->name = name; 198 cls->name = name;
200 cls->owner = owner; 199 cls->owner = owner;
@@ -500,13 +499,13 @@ int class_device_add(struct class_device *class_dev)
500 /* add the needed attributes to this device */ 499 /* add the needed attributes to this device */
501 if (MAJOR(class_dev->devt)) { 500 if (MAJOR(class_dev->devt)) {
502 struct class_device_attribute *attr; 501 struct class_device_attribute *attr;
503 attr = kmalloc(sizeof(*attr), GFP_KERNEL); 502 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
504 if (!attr) { 503 if (!attr) {
505 error = -ENOMEM; 504 error = -ENOMEM;
506 kobject_del(&class_dev->kobj); 505 kobject_del(&class_dev->kobj);
507 goto register_done; 506 goto register_done;
508 } 507 }
509 memset(attr, sizeof(*attr), 0x00); 508
510 attr->attr.name = "dev"; 509 attr->attr.name = "dev";
511 attr->attr.mode = S_IRUGO; 510 attr->attr.mode = S_IRUGO;
512 attr->attr.owner = parent->owner; 511 attr->attr.owner = parent->owner;
@@ -577,12 +576,11 @@ struct class_device *class_device_create(struct class *cls, dev_t devt,
577 if (cls == NULL || IS_ERR(cls)) 576 if (cls == NULL || IS_ERR(cls))
578 goto error; 577 goto error;
579 578
580 class_dev = kmalloc(sizeof(struct class_device), GFP_KERNEL); 579 class_dev = kzalloc(sizeof(*class_dev), GFP_KERNEL);
581 if (!class_dev) { 580 if (!class_dev) {
582 retval = -ENOMEM; 581 retval = -ENOMEM;
583 goto error; 582 goto error;
584 } 583 }
585 memset(class_dev, 0x00, sizeof(struct class_device));
586 584
587 class_dev->devt = devt; 585 class_dev->devt = devt;
588 class_dev->dev = device; 586 class_dev->dev = device;
@@ -671,6 +669,7 @@ void class_device_destroy(struct class *cls, dev_t devt)
671int class_device_rename(struct class_device *class_dev, char *new_name) 669int class_device_rename(struct class_device *class_dev, char *new_name)
672{ 670{
673 int error = 0; 671 int error = 0;
672 char *old_class_name = NULL, *new_class_name = NULL;
674 673
675 class_dev = class_device_get(class_dev); 674 class_dev = class_device_get(class_dev);
676 if (!class_dev) 675 if (!class_dev)
@@ -679,12 +678,24 @@ int class_device_rename(struct class_device *class_dev, char *new_name)
679 pr_debug("CLASS: renaming '%s' to '%s'\n", class_dev->class_id, 678 pr_debug("CLASS: renaming '%s' to '%s'\n", class_dev->class_id,
680 new_name); 679 new_name);
681 680
681 if (class_dev->dev)
682 old_class_name = make_class_name(class_dev);
683
682 strlcpy(class_dev->class_id, new_name, KOBJ_NAME_LEN); 684 strlcpy(class_dev->class_id, new_name, KOBJ_NAME_LEN);
683 685
684 error = kobject_rename(&class_dev->kobj, new_name); 686 error = kobject_rename(&class_dev->kobj, new_name);
685 687
688 if (class_dev->dev) {
689 new_class_name = make_class_name(class_dev);
690 sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj,
691 new_class_name);
692 sysfs_remove_link(&class_dev->dev->kobj, old_class_name);
693 }
686 class_device_put(class_dev); 694 class_device_put(class_dev);
687 695
696 kfree(old_class_name);
697 kfree(new_class_name);
698
688 return error; 699 return error;
689} 700}
690 701
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d5bbce38282f..3565e9795301 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -40,6 +40,9 @@
40 */ 40 */
41void device_bind_driver(struct device * dev) 41void device_bind_driver(struct device * dev)
42{ 42{
43 if (klist_node_attached(&dev->knode_driver))
44 return;
45
43 pr_debug("bound device '%s' to driver '%s'\n", 46 pr_debug("bound device '%s' to driver '%s'\n",
44 dev->bus_id, dev->driver->name); 47 dev->bus_id, dev->driver->name);
45 klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices); 48 klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 5bfa2e9a7c26..4acb2c5733c3 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -301,9 +301,9 @@ fw_register_class_device(struct class_device **class_dev_p,
301 const char *fw_name, struct device *device) 301 const char *fw_name, struct device *device)
302{ 302{
303 int retval; 303 int retval;
304 struct firmware_priv *fw_priv = kmalloc(sizeof (struct firmware_priv), 304 struct firmware_priv *fw_priv = kzalloc(sizeof(*fw_priv),
305 GFP_KERNEL); 305 GFP_KERNEL);
306 struct class_device *class_dev = kmalloc(sizeof (struct class_device), 306 struct class_device *class_dev = kzalloc(sizeof(*class_dev),
307 GFP_KERNEL); 307 GFP_KERNEL);
308 308
309 *class_dev_p = NULL; 309 *class_dev_p = NULL;
@@ -313,8 +313,6 @@ fw_register_class_device(struct class_device **class_dev_p,
313 retval = -ENOMEM; 313 retval = -ENOMEM;
314 goto error_kfree; 314 goto error_kfree;
315 } 315 }
316 memset(fw_priv, 0, sizeof (*fw_priv));
317 memset(class_dev, 0, sizeof (*class_dev));
318 316
319 init_completion(&fw_priv->completion); 317 init_completion(&fw_priv->completion);
320 fw_priv->attr_data = firmware_attr_data_tmpl; 318 fw_priv->attr_data = firmware_attr_data_tmpl;
@@ -402,14 +400,13 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
402 if (!firmware_p) 400 if (!firmware_p)
403 return -EINVAL; 401 return -EINVAL;
404 402
405 *firmware_p = firmware = kmalloc(sizeof (struct firmware), GFP_KERNEL); 403 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
406 if (!firmware) { 404 if (!firmware) {
407 printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n", 405 printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n",
408 __FUNCTION__); 406 __FUNCTION__);
409 retval = -ENOMEM; 407 retval = -ENOMEM;
410 goto out; 408 goto out;
411 } 409 }
412 memset(firmware, 0, sizeof (*firmware));
413 410
414 retval = fw_setup_class_device(firmware, &class_dev, name, device, 411 retval = fw_setup_class_device(firmware, &class_dev, name, device,
415 hotplug); 412 hotplug);
diff --git a/drivers/base/map.c b/drivers/base/map.c
index 2f455d86793c..b449dae6f0d3 100644
--- a/drivers/base/map.c
+++ b/drivers/base/map.c
@@ -135,7 +135,7 @@ retry:
135struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct semaphore *sem) 135struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct semaphore *sem)
136{ 136{
137 struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL); 137 struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL);
138 struct probe *base = kmalloc(sizeof(struct probe), GFP_KERNEL); 138 struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL);
139 int i; 139 int i;
140 140
141 if ((p == NULL) || (base == NULL)) { 141 if ((p == NULL) || (base == NULL)) {
@@ -144,7 +144,6 @@ struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct semaphore *sem)
144 return NULL; 144 return NULL;
145 } 145 }
146 146
147 memset(base, 0, sizeof(struct probe));
148 base->dev = 1; 147 base->dev = 1;
149 base->range = ~0; 148 base->range = ~0;
150 base->get = base_probe; 149 base->get = base_probe;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 3a5f4c991797..361e204209eb 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -225,13 +225,12 @@ struct platform_device *platform_device_register_simple(char *name, unsigned int
225 struct platform_object *pobj; 225 struct platform_object *pobj;
226 int retval; 226 int retval;
227 227
228 pobj = kmalloc(sizeof(struct platform_object) + sizeof(struct resource) * num, GFP_KERNEL); 228 pobj = kzalloc(sizeof(*pobj) + sizeof(struct resource) * num, GFP_KERNEL);
229 if (!pobj) { 229 if (!pobj) {
230 retval = -ENOMEM; 230 retval = -ENOMEM;
231 goto error; 231 goto error;
232 } 232 }
233 233
234 memset(pobj, 0, sizeof(*pobj));
235 pobj->pdev.name = name; 234 pobj->pdev.name = name;
236 pobj->pdev.id = id; 235 pobj->pdev.id = id;
237 pobj->pdev.dev.release = platform_device_release_simple; 236 pobj->pdev.dev.release = platform_device_release_simple;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 28f2c177a541..486b6e1c7dfb 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -47,14 +47,14 @@
47#include <linux/completion.h> 47#include <linux/completion.h>
48 48
49#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) 49#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
50#define DRIVER_NAME "HP CISS Driver (v 2.6.6)" 50#define DRIVER_NAME "HP CISS Driver (v 2.6.8)"
51#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,6) 51#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,8)
52 52
53/* Embedded module documentation macros - see modules.h */ 53/* Embedded module documentation macros - see modules.h */
54MODULE_AUTHOR("Hewlett-Packard Company"); 54MODULE_AUTHOR("Hewlett-Packard Company");
55MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6"); 55MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.8");
56MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" 56MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
57 " SA6i P600 P800 E400 E300"); 57 " SA6i P600 P800 P400 P400i E200 E200i");
58MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
59 59
60#include "cciss_cmd.h" 60#include "cciss_cmd.h"
@@ -83,12 +83,22 @@ static const struct pci_device_id cciss_pci_device_id[] = {
83 0x0E11, 0x4091, 0, 0, 0}, 83 0x0E11, 0x4091, 0, 0, 0},
84 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 84 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
85 0x103C, 0x3225, 0, 0, 0}, 85 0x103C, 0x3225, 0, 0, 0},
86 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB, 86 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
87 0x103c, 0x3223, 0, 0, 0}, 87 0x103c, 0x3223, 0, 0, 0},
88 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 88 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
89 0x103c, 0x3231, 0, 0, 0}, 89 0x103c, 0x3234, 0, 0, 0},
90 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 90 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC,
91 0x103c, 0x3233, 0, 0, 0}, 91 0x103c, 0x3235, 0, 0, 0},
92 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
93 0x103c, 0x3211, 0, 0, 0},
94 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
95 0x103c, 0x3212, 0, 0, 0},
96 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
97 0x103c, 0x3213, 0, 0, 0},
98 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
99 0x103c, 0x3214, 0, 0, 0},
100 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD,
101 0x103c, 0x3215, 0, 0, 0},
92 {0,} 102 {0,}
93}; 103};
94MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); 104MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
@@ -111,8 +121,13 @@ static struct board_type products[] = {
111 { 0x40910E11, "Smart Array 6i", &SA5_access}, 121 { 0x40910E11, "Smart Array 6i", &SA5_access},
112 { 0x3225103C, "Smart Array P600", &SA5_access}, 122 { 0x3225103C, "Smart Array P600", &SA5_access},
113 { 0x3223103C, "Smart Array P800", &SA5_access}, 123 { 0x3223103C, "Smart Array P800", &SA5_access},
114 { 0x3231103C, "Smart Array E400", &SA5_access}, 124 { 0x3234103C, "Smart Array P400", &SA5_access},
115 { 0x3233103C, "Smart Array E300", &SA5_access}, 125 { 0x3235103C, "Smart Array P400i", &SA5_access},
126 { 0x3211103C, "Smart Array E200i", &SA5_access},
127 { 0x3212103C, "Smart Array E200", &SA5_access},
128 { 0x3213103C, "Smart Array E200i", &SA5_access},
129 { 0x3214103C, "Smart Array E200i", &SA5_access},
130 { 0x3215103C, "Smart Array E200i", &SA5_access},
116}; 131};
117 132
118/* How long to wait (in millesconds) for board to go into simple mode */ 133/* How long to wait (in millesconds) for board to go into simple mode */
@@ -140,15 +155,26 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
140 155
141static int revalidate_allvol(ctlr_info_t *host); 156static int revalidate_allvol(ctlr_info_t *host);
142static int cciss_revalidate(struct gendisk *disk); 157static int cciss_revalidate(struct gendisk *disk);
143static int deregister_disk(struct gendisk *disk); 158static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk);
144static int register_new_disk(ctlr_info_t *h); 159static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, int clear_all);
145 160
161static void cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
162 int withirq, unsigned int *total_size, unsigned int *block_size);
163static void cciss_geometry_inquiry(int ctlr, int logvol,
164 int withirq, unsigned int total_size,
165 unsigned int block_size, InquiryData_struct *inq_buff,
166 drive_info_struct *drv);
146static void cciss_getgeometry(int cntl_num); 167static void cciss_getgeometry(int cntl_num);
147 168
148static void start_io( ctlr_info_t *h); 169static void start_io( ctlr_info_t *h);
149static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size, 170static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
150 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code, 171 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
151 unsigned char *scsi3addr, int cmd_type); 172 unsigned char *scsi3addr, int cmd_type);
173static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
174 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
175 int cmd_type);
176
177static void fail_all_cmds(unsigned long ctlr);
152 178
153#ifdef CONFIG_PROC_FS 179#ifdef CONFIG_PROC_FS
154static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 180static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
@@ -265,7 +291,7 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
265 for(i=0; i<=h->highest_lun; i++) { 291 for(i=0; i<=h->highest_lun; i++) {
266 292
267 drv = &h->drv[i]; 293 drv = &h->drv[i];
268 if (drv->block_size == 0) 294 if (drv->heads == 0)
269 continue; 295 continue;
270 296
271 vol_sz = drv->nr_blocks; 297 vol_sz = drv->nr_blocks;
@@ -363,6 +389,8 @@ static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
363 return NULL; 389 return NULL;
364 memset(c, 0, sizeof(CommandList_struct)); 390 memset(c, 0, sizeof(CommandList_struct));
365 391
392 c->cmdindex = -1;
393
366 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent( 394 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
367 h->pdev, sizeof(ErrorInfo_struct), 395 h->pdev, sizeof(ErrorInfo_struct),
368 &err_dma_handle); 396 &err_dma_handle);
@@ -393,6 +421,8 @@ static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
393 err_dma_handle = h->errinfo_pool_dhandle 421 err_dma_handle = h->errinfo_pool_dhandle
394 + i*sizeof(ErrorInfo_struct); 422 + i*sizeof(ErrorInfo_struct);
395 h->nr_allocs++; 423 h->nr_allocs++;
424
425 c->cmdindex = i;
396 } 426 }
397 427
398 c->busaddr = (__u32) cmd_dma_handle; 428 c->busaddr = (__u32) cmd_dma_handle;
@@ -453,6 +483,8 @@ static int cciss_open(struct inode *inode, struct file *filep)
453 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name); 483 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
454#endif /* CCISS_DEBUG */ 484#endif /* CCISS_DEBUG */
455 485
486 if (host->busy_initializing || drv->busy_configuring)
487 return -EBUSY;
456 /* 488 /*
457 * Root is allowed to open raw volume zero even if it's not configured 489 * Root is allowed to open raw volume zero even if it's not configured
458 * so array config can still work. Root is also allowed to open any 490 * so array config can still work. Root is also allowed to open any
@@ -796,10 +828,10 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
796 return(0); 828 return(0);
797 } 829 }
798 case CCISS_DEREGDISK: 830 case CCISS_DEREGDISK:
799 return deregister_disk(disk); 831 return rebuild_lun_table(host, disk);
800 832
801 case CCISS_REGNEWD: 833 case CCISS_REGNEWD:
802 return register_new_disk(host); 834 return rebuild_lun_table(host, NULL);
803 835
804 case CCISS_PASSTHRU: 836 case CCISS_PASSTHRU:
805 { 837 {
@@ -1143,48 +1175,323 @@ static int revalidate_allvol(ctlr_info_t *host)
1143 return 0; 1175 return 0;
1144} 1176}
1145 1177
1146static int deregister_disk(struct gendisk *disk) 1178/* This function will check the usage_count of the drive to be updated/added.
1179 * If the usage_count is zero then the drive information will be updated and
1180 * the disk will be re-registered with the kernel. If not then it will be
1181 * left alone for the next reboot. The exception to this is disk 0 which
1182 * will always be left registered with the kernel since it is also the
1183 * controller node. Any changes to disk 0 will show up on the next
1184 * reboot.
1185*/
1186static void cciss_update_drive_info(int ctlr, int drv_index)
1187 {
1188 ctlr_info_t *h = hba[ctlr];
1189 struct gendisk *disk;
1190 ReadCapdata_struct *size_buff = NULL;
1191 InquiryData_struct *inq_buff = NULL;
1192 unsigned int block_size;
1193 unsigned int total_size;
1194 unsigned long flags = 0;
1195 int ret = 0;
1196
1197 /* if the disk already exists then deregister it before proceeding*/
1198 if (h->drv[drv_index].raid_level != -1){
1199 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1200 h->drv[drv_index].busy_configuring = 1;
1201 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1202 ret = deregister_disk(h->gendisk[drv_index],
1203 &h->drv[drv_index], 0);
1204 h->drv[drv_index].busy_configuring = 0;
1205 }
1206
1207 /* If the disk is in use return */
1208 if (ret)
1209 return;
1210
1211
1212 /* Get information about the disk and modify the driver sturcture */
1213 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1214 if (size_buff == NULL)
1215 goto mem_msg;
1216 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1217 if (inq_buff == NULL)
1218 goto mem_msg;
1219
1220 cciss_read_capacity(ctlr, drv_index, size_buff, 1,
1221 &total_size, &block_size);
1222 cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size,
1223 inq_buff, &h->drv[drv_index]);
1224
1225 ++h->num_luns;
1226 disk = h->gendisk[drv_index];
1227 set_capacity(disk, h->drv[drv_index].nr_blocks);
1228
1229
1230 /* if it's the controller it's already added */
1231 if (drv_index){
1232 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1233
1234 /* Set up queue information */
1235 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1236 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1237
1238 /* This is a hardware imposed limit. */
1239 blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES);
1240
1241 /* This is a limit in the driver and could be eliminated. */
1242 blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES);
1243
1244 blk_queue_max_sectors(disk->queue, 512);
1245
1246 disk->queue->queuedata = hba[ctlr];
1247
1248 blk_queue_hardsect_size(disk->queue,
1249 hba[ctlr]->drv[drv_index].block_size);
1250
1251 h->drv[drv_index].queue = disk->queue;
1252 add_disk(disk);
1253 }
1254
1255freeret:
1256 kfree(size_buff);
1257 kfree(inq_buff);
1258 return;
1259mem_msg:
1260 printk(KERN_ERR "cciss: out of memory\n");
1261 goto freeret;
1262}
1263
1264/* This function will find the first index of the controllers drive array
1265 * that has a -1 for the raid_level and will return that index. This is
1266 * where new drives will be added. If the index to be returned is greater
1267 * than the highest_lun index for the controller then highest_lun is set
1268 * to this new index. If there are no available indexes then -1 is returned.
1269*/
1270static int cciss_find_free_drive_index(int ctlr)
1147{ 1271{
1272 int i;
1273
1274 for (i=0; i < CISS_MAX_LUN; i++){
1275 if (hba[ctlr]->drv[i].raid_level == -1){
1276 if (i > hba[ctlr]->highest_lun)
1277 hba[ctlr]->highest_lun = i;
1278 return i;
1279 }
1280 }
1281 return -1;
1282}
1283
1284/* This function will add and remove logical drives from the Logical
1285 * drive array of the controller and maintain persistancy of ordering
1286 * so that mount points are preserved until the next reboot. This allows
1287 * for the removal of logical drives in the middle of the drive array
1288 * without a re-ordering of those drives.
1289 * INPUT
1290 * h = The controller to perform the operations on
1291 * del_disk = The disk to remove if specified. If the value given
1292 * is NULL then no disk is removed.
1293*/
1294static int rebuild_lun_table(ctlr_info_t *h, struct gendisk *del_disk)
1295{
1296 int ctlr = h->ctlr;
1297 int num_luns;
1298 ReportLunData_struct *ld_buff = NULL;
1299 drive_info_struct *drv = NULL;
1300 int return_code;
1301 int listlength = 0;
1302 int i;
1303 int drv_found;
1304 int drv_index = 0;
1305 __u32 lunid = 0;
1148 unsigned long flags; 1306 unsigned long flags;
1307
1308 /* Set busy_configuring flag for this operation */
1309 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1310 if (h->num_luns >= CISS_MAX_LUN){
1311 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1312 return -EINVAL;
1313 }
1314
1315 if (h->busy_configuring){
1316 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1317 return -EBUSY;
1318 }
1319 h->busy_configuring = 1;
1320
1321 /* if del_disk is NULL then we are being called to add a new disk
1322 * and update the logical drive table. If it is not NULL then
1323 * we will check if the disk is in use or not.
1324 */
1325 if (del_disk != NULL){
1326 drv = get_drv(del_disk);
1327 drv->busy_configuring = 1;
1328 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1329 return_code = deregister_disk(del_disk, drv, 1);
1330 drv->busy_configuring = 0;
1331 h->busy_configuring = 0;
1332 return return_code;
1333 } else {
1334 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1335 if (!capable(CAP_SYS_RAWIO))
1336 return -EPERM;
1337
1338 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1339 if (ld_buff == NULL)
1340 goto mem_msg;
1341
1342 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1343 sizeof(ReportLunData_struct), 0, 0, 0,
1344 TYPE_CMD);
1345
1346 if (return_code == IO_OK){
1347 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1348 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1349 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1350 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1351 } else{ /* reading number of logical volumes failed */
1352 printk(KERN_WARNING "cciss: report logical volume"
1353 " command failed\n");
1354 listlength = 0;
1355 goto freeret;
1356 }
1357
1358 num_luns = listlength / 8; /* 8 bytes per entry */
1359 if (num_luns > CISS_MAX_LUN){
1360 num_luns = CISS_MAX_LUN;
1361 printk(KERN_WARNING "cciss: more luns configured"
1362 " on controller than can be handled by"
1363 " this driver.\n");
1364 }
1365
1366 /* Compare controller drive array to drivers drive array.
1367 * Check for updates in the drive information and any new drives
1368 * on the controller.
1369 */
1370 for (i=0; i < num_luns; i++){
1371 int j;
1372
1373 drv_found = 0;
1374
1375 lunid = (0xff &
1376 (unsigned int)(ld_buff->LUN[i][3])) << 24;
1377 lunid |= (0xff &
1378 (unsigned int)(ld_buff->LUN[i][2])) << 16;
1379 lunid |= (0xff &
1380 (unsigned int)(ld_buff->LUN[i][1])) << 8;
1381 lunid |= 0xff &
1382 (unsigned int)(ld_buff->LUN[i][0]);
1383
1384 /* Find if the LUN is already in the drive array
1385 * of the controller. If so then update its info
1386 * if not is use. If it does not exist then find
1387 * the first free index and add it.
1388 */
1389 for (j=0; j <= h->highest_lun; j++){
1390 if (h->drv[j].LunID == lunid){
1391 drv_index = j;
1392 drv_found = 1;
1393 }
1394 }
1395
1396 /* check if the drive was found already in the array */
1397 if (!drv_found){
1398 drv_index = cciss_find_free_drive_index(ctlr);
1399 if (drv_index == -1)
1400 goto freeret;
1401
1402 }
1403 h->drv[drv_index].LunID = lunid;
1404 cciss_update_drive_info(ctlr, drv_index);
1405 } /* end for */
1406 } /* end else */
1407
1408freeret:
1409 kfree(ld_buff);
1410 h->busy_configuring = 0;
1411 /* We return -1 here to tell the ACU that we have registered/updated
1412 * all of the drives that we can and to keep it from calling us
1413 * additional times.
1414 */
1415 return -1;
1416mem_msg:
1417 printk(KERN_ERR "cciss: out of memory\n");
1418 goto freeret;
1419}
1420
1421/* This function will deregister the disk and it's queue from the
1422 * kernel. It must be called with the controller lock held and the
1423 * drv structures busy_configuring flag set. It's parameters are:
1424 *
1425 * disk = This is the disk to be deregistered
1426 * drv = This is the drive_info_struct associated with the disk to be
1427 * deregistered. It contains information about the disk used
1428 * by the driver.
1429 * clear_all = This flag determines whether or not the disk information
1430 * is going to be completely cleared out and the highest_lun
1431 * reset. Sometimes we want to clear out information about
1432 * the disk in preperation for re-adding it. In this case
1433 * the highest_lun should be left unchanged and the LunID
1434 * should not be cleared.
1435*/
1436static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
1437 int clear_all)
1438{
1149 ctlr_info_t *h = get_host(disk); 1439 ctlr_info_t *h = get_host(disk);
1150 drive_info_struct *drv = get_drv(disk);
1151 int ctlr = h->ctlr;
1152 1440
1153 if (!capable(CAP_SYS_RAWIO)) 1441 if (!capable(CAP_SYS_RAWIO))
1154 return -EPERM; 1442 return -EPERM;
1155 1443
1156 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1157 /* make sure logical volume is NOT is use */ 1444 /* make sure logical volume is NOT is use */
1158 if( drv->usage_count > 1) { 1445 if(clear_all || (h->gendisk[0] == disk)) {
1159 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1446 if (drv->usage_count > 1)
1160 return -EBUSY; 1447 return -EBUSY;
1161 } 1448 }
1162 drv->usage_count++; 1449 else
1163 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1450 if( drv->usage_count > 0 )
1451 return -EBUSY;
1164 1452
1165 /* invalidate the devices and deregister the disk */ 1453 /* invalidate the devices and deregister the disk. If it is disk
1166 if (disk->flags & GENHD_FL_UP) 1454 * zero do not deregister it but just zero out it's values. This
1455 * allows us to delete disk zero but keep the controller registered.
1456 */
1457 if (h->gendisk[0] != disk){
1458 if (disk->flags & GENHD_FL_UP){
1459 blk_cleanup_queue(disk->queue);
1167 del_gendisk(disk); 1460 del_gendisk(disk);
1461 drv->queue = NULL;
1462 }
1463 }
1464
1465 --h->num_luns;
1466 /* zero out the disk size info */
1467 drv->nr_blocks = 0;
1468 drv->block_size = 0;
1469 drv->heads = 0;
1470 drv->sectors = 0;
1471 drv->cylinders = 0;
1472 drv->raid_level = -1; /* This can be used as a flag variable to
1473 * indicate that this element of the drive
1474 * array is free.
1475 */
1476
1477 if (clear_all){
1168 /* check to see if it was the last disk */ 1478 /* check to see if it was the last disk */
1169 if (drv == h->drv + h->highest_lun) { 1479 if (drv == h->drv + h->highest_lun) {
1170 /* if so, find the new hightest lun */ 1480 /* if so, find the new hightest lun */
1171 int i, newhighest =-1; 1481 int i, newhighest =-1;
1172 for(i=0; i<h->highest_lun; i++) { 1482 for(i=0; i<h->highest_lun; i++) {
1173 /* if the disk has size > 0, it is available */ 1483 /* if the disk has size > 0, it is available */
1174 if (h->drv[i].nr_blocks) 1484 if (h->drv[i].heads)
1175 newhighest = i; 1485 newhighest = i;
1176 } 1486 }
1177 h->highest_lun = newhighest; 1487 h->highest_lun = newhighest;
1178
1179 } 1488 }
1180 --h->num_luns; 1489
1181 /* zero out the disk size info */
1182 drv->nr_blocks = 0;
1183 drv->block_size = 0;
1184 drv->cylinders = 0;
1185 drv->LunID = 0; 1490 drv->LunID = 0;
1491 }
1186 return(0); 1492 return(0);
1187} 1493}
1494
1188static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, 1495static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1189 size_t size, 1496 size_t size,
1190 unsigned int use_unit_num, /* 0: address the controller, 1497 unsigned int use_unit_num, /* 0: address the controller,
@@ -1420,8 +1727,10 @@ case CMD_HARDWARE_ERR:
1420 } 1727 }
1421 } 1728 }
1422 /* unlock the buffers from DMA */ 1729 /* unlock the buffers from DMA */
1730 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
1731 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1423 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val, 1732 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1424 size, PCI_DMA_BIDIRECTIONAL); 1733 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1425 cmd_free(h, c, 0); 1734 cmd_free(h, c, 0);
1426 return(return_status); 1735 return(return_status);
1427 1736
@@ -1495,164 +1804,6 @@ cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1495 return; 1804 return;
1496} 1805}
1497 1806
1498static int register_new_disk(ctlr_info_t *h)
1499{
1500 struct gendisk *disk;
1501 int ctlr = h->ctlr;
1502 int i;
1503 int num_luns;
1504 int logvol;
1505 int new_lun_found = 0;
1506 int new_lun_index = 0;
1507 int free_index_found = 0;
1508 int free_index = 0;
1509 ReportLunData_struct *ld_buff = NULL;
1510 ReadCapdata_struct *size_buff = NULL;
1511 InquiryData_struct *inq_buff = NULL;
1512 int return_code;
1513 int listlength = 0;
1514 __u32 lunid = 0;
1515 unsigned int block_size;
1516 unsigned int total_size;
1517
1518 if (!capable(CAP_SYS_RAWIO))
1519 return -EPERM;
1520 /* if we have no space in our disk array left to add anything */
1521 if( h->num_luns >= CISS_MAX_LUN)
1522 return -EINVAL;
1523
1524 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1525 if (ld_buff == NULL)
1526 goto mem_msg;
1527 memset(ld_buff, 0, sizeof(ReportLunData_struct));
1528 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1529 if (size_buff == NULL)
1530 goto mem_msg;
1531 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1532 if (inq_buff == NULL)
1533 goto mem_msg;
1534
1535 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1536 sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD);
1537
1538 if( return_code == IO_OK)
1539 {
1540
1541 // printk("LUN Data\n--------------------------\n");
1542
1543 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1544 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1545 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1546 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1547 } else /* reading number of logical volumes failed */
1548 {
1549 printk(KERN_WARNING "cciss: report logical volume"
1550 " command failed\n");
1551 listlength = 0;
1552 goto free_err;
1553 }
1554 num_luns = listlength / 8; // 8 bytes pre entry
1555 if (num_luns > CISS_MAX_LUN)
1556 {
1557 num_luns = CISS_MAX_LUN;
1558 }
1559#ifdef CCISS_DEBUG
1560 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
1561 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
1562 ld_buff->LUNListLength[3], num_luns);
1563#endif
1564 for(i=0; i< num_luns; i++)
1565 {
1566 int j;
1567 int lunID_found = 0;
1568
1569 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
1570 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
1571 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
1572 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1573
1574 /* check to see if this is a new lun */
1575 for(j=0; j <= h->highest_lun; j++)
1576 {
1577#ifdef CCISS_DEBUG
1578 printk("Checking %d %x against %x\n", j,h->drv[j].LunID,
1579 lunid);
1580#endif /* CCISS_DEBUG */
1581 if (h->drv[j].LunID == lunid)
1582 {
1583 lunID_found = 1;
1584 break;
1585 }
1586
1587 }
1588 if( lunID_found == 1)
1589 continue;
1590 else
1591 { /* It is the new lun we have been looking for */
1592#ifdef CCISS_DEBUG
1593 printk("new lun found at %d\n", i);
1594#endif /* CCISS_DEBUG */
1595 new_lun_index = i;
1596 new_lun_found = 1;
1597 break;
1598 }
1599 }
1600 if (!new_lun_found)
1601 {
1602 printk(KERN_WARNING "cciss: New Logical Volume not found\n");
1603 goto free_err;
1604 }
1605 /* Now find the free index */
1606 for(i=0; i <CISS_MAX_LUN; i++)
1607 {
1608#ifdef CCISS_DEBUG
1609 printk("Checking Index %d\n", i);
1610#endif /* CCISS_DEBUG */
1611 if(h->drv[i].LunID == 0)
1612 {
1613#ifdef CCISS_DEBUG
1614 printk("free index found at %d\n", i);
1615#endif /* CCISS_DEBUG */
1616 free_index_found = 1;
1617 free_index = i;
1618 break;
1619 }
1620 }
1621 if (!free_index_found)
1622 {
1623 printk(KERN_WARNING "cciss: unable to find free slot for disk\n");
1624 goto free_err;
1625 }
1626
1627 logvol = free_index;
1628 h->drv[logvol].LunID = lunid;
1629 /* there could be gaps in lun numbers, track hightest */
1630 if(h->highest_lun < lunid)
1631 h->highest_lun = logvol;
1632 cciss_read_capacity(ctlr, logvol, size_buff, 1,
1633 &total_size, &block_size);
1634 cciss_geometry_inquiry(ctlr, logvol, 1, total_size, block_size,
1635 inq_buff, &h->drv[logvol]);
1636 h->drv[logvol].usage_count = 0;
1637 ++h->num_luns;
1638 /* setup partitions per disk */
1639 disk = h->gendisk[logvol];
1640 set_capacity(disk, h->drv[logvol].nr_blocks);
1641 /* if it's the controller it's already added */
1642 if(logvol)
1643 add_disk(disk);
1644freeret:
1645 kfree(ld_buff);
1646 kfree(size_buff);
1647 kfree(inq_buff);
1648 return (logvol);
1649mem_msg:
1650 printk(KERN_ERR "cciss: out of memory\n");
1651free_err:
1652 logvol = -1;
1653 goto freeret;
1654}
1655
1656static int cciss_revalidate(struct gendisk *disk) 1807static int cciss_revalidate(struct gendisk *disk)
1657{ 1808{
1658 ctlr_info_t *h = get_host(disk); 1809 ctlr_info_t *h = get_host(disk);
@@ -1859,8 +2010,10 @@ resend_cmd1:
1859 2010
1860cleanup1: 2011cleanup1:
1861 /* unlock the data buffer from DMA */ 2012 /* unlock the data buffer from DMA */
2013 buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
2014 buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
1862 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val, 2015 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
1863 size, PCI_DMA_BIDIRECTIONAL); 2016 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
1864 cmd_free(info_p, c, 1); 2017 cmd_free(info_p, c, 1);
1865 return (status); 2018 return (status);
1866} 2019}
@@ -2111,7 +2264,11 @@ queue:
2111 /* fill in the request */ 2264 /* fill in the request */
2112 drv = creq->rq_disk->private_data; 2265 drv = creq->rq_disk->private_data;
2113 c->Header.ReplyQueue = 0; // unused in simple mode 2266 c->Header.ReplyQueue = 0; // unused in simple mode
2114 c->Header.Tag.lower = c->busaddr; // use the physical address the cmd block for tag 2267 /* got command from pool, so use the command block index instead */
2268 /* for direct lookups. */
2269 /* The first 2 bits are reserved for controller error reporting. */
2270 c->Header.Tag.lower = (c->cmdindex << 3);
2271 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
2115 c->Header.LUN.LogDev.VolId= drv->LunID; 2272 c->Header.LUN.LogDev.VolId= drv->LunID;
2116 c->Header.LUN.LogDev.Mode = 1; 2273 c->Header.LUN.LogDev.Mode = 1;
2117 c->Request.CDBLen = 10; // 12 byte commands not in FW yet; 2274 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
@@ -2186,7 +2343,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2186 ctlr_info_t *h = dev_id; 2343 ctlr_info_t *h = dev_id;
2187 CommandList_struct *c; 2344 CommandList_struct *c;
2188 unsigned long flags; 2345 unsigned long flags;
2189 __u32 a, a1; 2346 __u32 a, a1, a2;
2190 int j; 2347 int j;
2191 int start_queue = h->next_to_run; 2348 int start_queue = h->next_to_run;
2192 2349
@@ -2204,10 +2361,21 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2204 while((a = h->access.command_completed(h)) != FIFO_EMPTY) 2361 while((a = h->access.command_completed(h)) != FIFO_EMPTY)
2205 { 2362 {
2206 a1 = a; 2363 a1 = a;
2364 if ((a & 0x04)) {
2365 a2 = (a >> 3);
2366 if (a2 >= NR_CMDS) {
2367 printk(KERN_WARNING "cciss: controller cciss%d failed, stopping.\n", h->ctlr);
2368 fail_all_cmds(h->ctlr);
2369 return IRQ_HANDLED;
2370 }
2371
2372 c = h->cmd_pool + a2;
2373 a = c->busaddr;
2374
2375 } else {
2207 a &= ~3; 2376 a &= ~3;
2208 if ((c = h->cmpQ) == NULL) 2377 if ((c = h->cmpQ) == NULL) {
2209 { 2378 printk(KERN_WARNING "cciss: Completion of %08x ignored\n", a1);
2210 printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1);
2211 continue; 2379 continue;
2212 } 2380 }
2213 while(c->busaddr != a) { 2381 while(c->busaddr != a) {
@@ -2215,6 +2383,7 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2215 if (c == h->cmpQ) 2383 if (c == h->cmpQ)
2216 break; 2384 break;
2217 } 2385 }
2386 }
2218 /* 2387 /*
2219 * If we've found the command, take it off the 2388 * If we've found the command, take it off the
2220 * completion Q and free it 2389 * completion Q and free it
@@ -2634,12 +2803,16 @@ static void cciss_getgeometry(int cntl_num)
2634#endif /* CCISS_DEBUG */ 2803#endif /* CCISS_DEBUG */
2635 2804
2636 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1; 2805 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
2637 for(i=0; i< hba[cntl_num]->num_luns; i++) 2806// for(i=0; i< hba[cntl_num]->num_luns; i++)
2807 for(i=0; i < CISS_MAX_LUN; i++)
2638 { 2808 {
2639 2809 if (i < hba[cntl_num]->num_luns){
2640 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24; 2810 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3]))
2641 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16; 2811 << 24;
2642 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8; 2812 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2]))
2813 << 16;
2814 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1]))
2815 << 8;
2643 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]); 2816 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
2644 2817
2645 hba[cntl_num]->drv[i].LunID = lunid; 2818 hba[cntl_num]->drv[i].LunID = lunid;
@@ -2647,13 +2820,18 @@ static void cciss_getgeometry(int cntl_num)
2647 2820
2648#ifdef CCISS_DEBUG 2821#ifdef CCISS_DEBUG
2649 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i, 2822 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
2650 ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2], 2823 ld_buff->LUN[i][0], ld_buff->LUN[i][1],
2651 ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID); 2824 ld_buff->LUN[i][2], ld_buff->LUN[i][3],
2825 hba[cntl_num]->drv[i].LunID);
2652#endif /* CCISS_DEBUG */ 2826#endif /* CCISS_DEBUG */
2653 cciss_read_capacity(cntl_num, i, size_buff, 0, 2827 cciss_read_capacity(cntl_num, i, size_buff, 0,
2654 &total_size, &block_size); 2828 &total_size, &block_size);
2655 cciss_geometry_inquiry(cntl_num, i, 0, total_size, block_size, 2829 cciss_geometry_inquiry(cntl_num, i, 0, total_size,
2656 inq_buff, &hba[cntl_num]->drv[i]); 2830 block_size, inq_buff, &hba[cntl_num]->drv[i]);
2831 } else {
2832 /* initialize raid_level to indicate a free space */
2833 hba[cntl_num]->drv[i].raid_level = -1;
2834 }
2657 } 2835 }
2658 kfree(ld_buff); 2836 kfree(ld_buff);
2659 kfree(size_buff); 2837 kfree(size_buff);
@@ -2727,6 +2905,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
2727 i = alloc_cciss_hba(); 2905 i = alloc_cciss_hba();
2728 if(i < 0) 2906 if(i < 0)
2729 return (-1); 2907 return (-1);
2908
2909 hba[i]->busy_initializing = 1;
2910
2730 if (cciss_pci_init(hba[i], pdev) != 0) 2911 if (cciss_pci_init(hba[i], pdev) != 0)
2731 goto clean1; 2912 goto clean1;
2732 2913
@@ -2807,6 +2988,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
2807 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); 2988 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
2808 2989
2809 cciss_procinit(i); 2990 cciss_procinit(i);
2991 hba[i]->busy_initializing = 0;
2810 2992
2811 for(j=0; j < NWD; j++) { /* mfm */ 2993 for(j=0; j < NWD; j++) { /* mfm */
2812 drive_info_struct *drv = &(hba[i]->drv[j]); 2994 drive_info_struct *drv = &(hba[i]->drv[j]);
@@ -2869,6 +3051,7 @@ clean2:
2869clean1: 3051clean1:
2870 release_io_mem(hba[i]); 3052 release_io_mem(hba[i]);
2871 free_hba(i); 3053 free_hba(i);
3054 hba[i]->busy_initializing = 0;
2872 return(-1); 3055 return(-1);
2873} 3056}
2874 3057
@@ -2913,9 +3096,10 @@ static void __devexit cciss_remove_one (struct pci_dev *pdev)
2913 /* remove it from the disk list */ 3096 /* remove it from the disk list */
2914 for (j = 0; j < NWD; j++) { 3097 for (j = 0; j < NWD; j++) {
2915 struct gendisk *disk = hba[i]->gendisk[j]; 3098 struct gendisk *disk = hba[i]->gendisk[j];
2916 if (disk->flags & GENHD_FL_UP) 3099 if (disk->flags & GENHD_FL_UP) {
2917 blk_cleanup_queue(disk->queue);
2918 del_gendisk(disk); 3100 del_gendisk(disk);
3101 blk_cleanup_queue(disk->queue);
3102 }
2919 } 3103 }
2920 3104
2921 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), 3105 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
@@ -2964,5 +3148,43 @@ static void __exit cciss_cleanup(void)
2964 remove_proc_entry("cciss", proc_root_driver); 3148 remove_proc_entry("cciss", proc_root_driver);
2965} 3149}
2966 3150
3151static void fail_all_cmds(unsigned long ctlr)
3152{
3153 /* If we get here, the board is apparently dead. */
3154 ctlr_info_t *h = hba[ctlr];
3155 CommandList_struct *c;
3156 unsigned long flags;
3157
3158 printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
3159 h->alive = 0; /* the controller apparently died... */
3160
3161 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
3162
3163 pci_disable_device(h->pdev); /* Make sure it is really dead. */
3164
3165 /* move everything off the request queue onto the completed queue */
3166 while( (c = h->reqQ) != NULL ) {
3167 removeQ(&(h->reqQ), c);
3168 h->Qdepth--;
3169 addQ (&(h->cmpQ), c);
3170 }
3171
3172 /* Now, fail everything on the completed queue with a HW error */
3173 while( (c = h->cmpQ) != NULL ) {
3174 removeQ(&h->cmpQ, c);
3175 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
3176 if (c->cmd_type == CMD_RWREQ) {
3177 complete_command(h, c, 0);
3178 } else if (c->cmd_type == CMD_IOCTL_PEND)
3179 complete(c->waiting);
3180#ifdef CONFIG_CISS_SCSI_TAPE
3181 else if (c->cmd_type == CMD_SCSI)
3182 complete_scsi_command(c, 0, 0);
3183#endif
3184 }
3185 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
3186 return;
3187}
3188
2967module_init(cciss_init); 3189module_init(cciss_init);
2968module_exit(cciss_cleanup); 3190module_exit(cciss_cleanup);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 566587d0a500..ef277baee9fd 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -35,7 +35,13 @@ typedef struct _drive_info_struct
35 int heads; 35 int heads;
36 int sectors; 36 int sectors;
37 int cylinders; 37 int cylinders;
38 int raid_level; 38 int raid_level; /* set to -1 to indicate that
39 * the drive is not in use/configured
40 */
41 int busy_configuring; /*This is set when the drive is being removed
42 *to prevent it from being opened or it's queue
43 *from being started.
44 */
39} drive_info_struct; 45} drive_info_struct;
40 46
41struct ctlr_info 47struct ctlr_info
@@ -83,6 +89,7 @@ struct ctlr_info
83 int nr_allocs; 89 int nr_allocs;
84 int nr_frees; 90 int nr_frees;
85 int busy_configuring; 91 int busy_configuring;
92 int busy_initializing;
86 93
87 /* This element holds the zero based queue number of the last 94 /* This element holds the zero based queue number of the last
88 * queue to be started. It is used for fairness. 95 * queue to be started. It is used for fairness.
@@ -94,6 +101,7 @@ struct ctlr_info
94#ifdef CONFIG_CISS_SCSI_TAPE 101#ifdef CONFIG_CISS_SCSI_TAPE
95 void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ 102 void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
96#endif 103#endif
104 unsigned char alive;
97}; 105};
98 106
99/* Defining the diffent access_menthods */ 107/* Defining the diffent access_menthods */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index a88a88817623..53fea549ba8b 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -226,6 +226,10 @@ typedef struct _ErrorInfo_struct {
226#define CMD_MSG_DONE 0x04 226#define CMD_MSG_DONE 0x04
227#define CMD_MSG_TIMEOUT 0x05 227#define CMD_MSG_TIMEOUT 0x05
228 228
229/* This structure needs to be divisible by 8 for new
230 * indexing method.
231 */
232#define PADSIZE (sizeof(long) - 4)
229typedef struct _CommandList_struct { 233typedef struct _CommandList_struct {
230 CommandListHeader_struct Header; 234 CommandListHeader_struct Header;
231 RequestBlock_struct Request; 235 RequestBlock_struct Request;
@@ -236,14 +240,14 @@ typedef struct _CommandList_struct {
236 ErrorInfo_struct * err_info; /* pointer to the allocated mem */ 240 ErrorInfo_struct * err_info; /* pointer to the allocated mem */
237 int ctlr; 241 int ctlr;
238 int cmd_type; 242 int cmd_type;
243 long cmdindex;
239 struct _CommandList_struct *prev; 244 struct _CommandList_struct *prev;
240 struct _CommandList_struct *next; 245 struct _CommandList_struct *next;
241 struct request * rq; 246 struct request * rq;
242 struct completion *waiting; 247 struct completion *waiting;
243 int retry_count; 248 int retry_count;
244#ifdef CONFIG_CISS_SCSI_TAPE
245 void * scsi_cmd; 249 void * scsi_cmd;
246#endif 250 char pad[PADSIZE];
247} CommandList_struct; 251} CommandList_struct;
248 252
249//Configuration Table Structure 253//Configuration Table Structure
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index f16e3caed58a..e183a3ef7839 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -93,6 +93,7 @@ struct cciss_scsi_cmd_stack_elem_t {
93 CommandList_struct cmd; 93 CommandList_struct cmd;
94 ErrorInfo_struct Err; 94 ErrorInfo_struct Err;
95 __u32 busaddr; 95 __u32 busaddr;
96 __u32 pad;
96}; 97};
97 98
98#pragma pack() 99#pragma pack()
@@ -877,7 +878,7 @@ cciss_scsi_interpret_error(CommandList_struct *cp)
877 878
878static int 879static int
879cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 880cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
880 InquiryData_struct *buf) 881 unsigned char *buf, unsigned char bufsize)
881{ 882{
882 int rc; 883 int rc;
883 CommandList_struct *cp; 884 CommandList_struct *cp;
@@ -900,11 +901,10 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
900 cdb[1] = 0; 901 cdb[1] = 0;
901 cdb[2] = 0; 902 cdb[2] = 0;
902 cdb[3] = 0; 903 cdb[3] = 0;
903 cdb[4] = sizeof(*buf) & 0xff; 904 cdb[4] = bufsize;
904 cdb[5] = 0; 905 cdb[5] = 0;
905 rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, 906 rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb,
906 6, (unsigned char *) buf, 907 6, buf, bufsize, XFER_READ);
907 sizeof(*buf), XFER_READ);
908 908
909 if (rc != 0) return rc; /* something went wrong */ 909 if (rc != 0) return rc; /* something went wrong */
910 910
@@ -1000,9 +1000,10 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1000 that though. 1000 that though.
1001 1001
1002 */ 1002 */
1003 1003#define OBDR_TAPE_INQ_SIZE 49
1004#define OBDR_TAPE_SIG "$DR-10"
1004 ReportLunData_struct *ld_buff; 1005 ReportLunData_struct *ld_buff;
1005 InquiryData_struct *inq_buff; 1006 unsigned char *inq_buff;
1006 unsigned char scsi3addr[8]; 1007 unsigned char scsi3addr[8];
1007 ctlr_info_t *c; 1008 ctlr_info_t *c;
1008 __u32 num_luns=0; 1009 __u32 num_luns=0;
@@ -1020,7 +1021,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1020 return; 1021 return;
1021 } 1022 }
1022 memset(ld_buff, 0, reportlunsize); 1023 memset(ld_buff, 0, reportlunsize);
1023 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL); 1024 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
1024 if (inq_buff == NULL) { 1025 if (inq_buff == NULL) {
1025 printk(KERN_ERR "cciss: out of memory\n"); 1026 printk(KERN_ERR "cciss: out of memory\n");
1026 kfree(ld_buff); 1027 kfree(ld_buff);
@@ -1051,19 +1052,36 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
1051 1052
1052 /* for each physical lun, do an inquiry */ 1053 /* for each physical lun, do an inquiry */
1053 if (ld_buff->LUN[i][3] & 0xC0) continue; 1054 if (ld_buff->LUN[i][3] & 0xC0) continue;
1054 memset(inq_buff, 0, sizeof(InquiryData_struct)); 1055 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
1055 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); 1056 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
1056 1057
1057 if (cciss_scsi_do_inquiry(hba[cntl_num], 1058 if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, inq_buff,
1058 scsi3addr, inq_buff) != 0) 1059 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1059 {
1060 /* Inquiry failed (msg printed already) */ 1060 /* Inquiry failed (msg printed already) */
1061 devtype = 0; /* so we will skip this device. */ 1061 devtype = 0; /* so we will skip this device. */
1062 } else /* what kind of device is this? */ 1062 } else /* what kind of device is this? */
1063 devtype = (inq_buff->data_byte[0] & 0x1f); 1063 devtype = (inq_buff[0] & 0x1f);
1064 1064
1065 switch (devtype) 1065 switch (devtype)
1066 { 1066 {
1067 case 0x05: /* CD-ROM */ {
1068
1069 /* We don't *really* support actual CD-ROM devices,
1070 * just this "One Button Disaster Recovery" tape drive
1071 * which temporarily pretends to be a CD-ROM drive.
1072 * So we check that the device is really an OBDR tape
1073 * device by checking for "$DR-10" in bytes 43-48 of
1074 * the inquiry data.
1075 */
1076 char obdr_sig[7];
1077
1078 strncpy(obdr_sig, &inq_buff[43], 6);
1079 obdr_sig[6] = '\0';
1080 if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
1081 /* Not OBDR device, ignore it. */
1082 break;
1083 }
1084 /* fall through . . . */
1067 case 0x01: /* sequential access, (tape) */ 1085 case 0x01: /* sequential access, (tape) */
1068 case 0x08: /* medium changer */ 1086 case 0x08: /* medium changer */
1069 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 1087 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
@@ -1126,6 +1144,7 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
1126 1144
1127 int buflen, datalen; 1145 int buflen, datalen;
1128 ctlr_info_t *ci; 1146 ctlr_info_t *ci;
1147 int i;
1129 int cntl_num; 1148 int cntl_num;
1130 1149
1131 1150
@@ -1136,8 +1155,28 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
1136 cntl_num = ci->ctlr; /* Get our index into the hba[] array */ 1155 cntl_num = ci->ctlr; /* Get our index into the hba[] array */
1137 1156
1138 if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */ 1157 if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */
1139 buflen = sprintf(buffer, "hostnum=%d\n", sh->host_no); 1158 buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
1140 1159 cntl_num, sh->host_no);
1160
1161 /* this information is needed by apps to know which cciss
1162 device corresponds to which scsi host number without
1163 having to open a scsi target device node. The device
1164 information is not a duplicate of /proc/scsi/scsi because
1165 the two may be out of sync due to scsi hotplug, rather
1166 this info is for an app to be able to use to know how to
1167 get them back in sync. */
1168
1169 for (i=0;i<ccissscsi[cntl_num].ndevices;i++) {
1170 struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i];
1171 buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
1172 "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1173 sh->host_no, sd->bus, sd->target, sd->lun,
1174 sd->devtype,
1175 sd->scsi3addr[0], sd->scsi3addr[1],
1176 sd->scsi3addr[2], sd->scsi3addr[3],
1177 sd->scsi3addr[4], sd->scsi3addr[5],
1178 sd->scsi3addr[6], sd->scsi3addr[7]);
1179 }
1141 datalen = buflen - offset; 1180 datalen = buflen - offset;
1142 if (datalen < 0) { /* they're reading past EOF. */ 1181 if (datalen < 0) { /* they're reading past EOF. */
1143 datalen = 0; 1182 datalen = 0;
@@ -1399,7 +1438,7 @@ cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
1399 1438
1400 CPQ_TAPE_LOCK(ctlr, flags); 1439 CPQ_TAPE_LOCK(ctlr, flags);
1401 size = sprintf(buffer + *len, 1440 size = sprintf(buffer + *len,
1402 " Sequential access devices: %d\n\n", 1441 "Sequential access devices: %d\n\n",
1403 ccissscsi[ctlr].ndevices); 1442 ccissscsi[ctlr].ndevices);
1404 CPQ_TAPE_UNLOCK(ctlr, flags); 1443 CPQ_TAPE_UNLOCK(ctlr, flags);
1405 *pos += size; *len += size; 1444 *pos += size; *len += size;
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 483d71b10cf9..baedac522945 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -2373,44 +2373,6 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2373 2373
2374EXPORT_SYMBOL(blkdev_issue_flush); 2374EXPORT_SYMBOL(blkdev_issue_flush);
2375 2375
2376/**
2377 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2378 * @q: device queue
2379 * @disk: gendisk
2380 * @error_sector: error offset
2381 *
2382 * Description:
2383 * Devices understanding the SCSI command set, can use this function as
2384 * a helper for issuing a cache flush. Note: driver is required to store
2385 * the error offset (in case of error flushing) in ->sector of struct
2386 * request.
2387 */
2388int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2389 sector_t *error_sector)
2390{
2391 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2392 int ret;
2393
2394 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2395 rq->sector = 0;
2396 memset(rq->cmd, 0, sizeof(rq->cmd));
2397 rq->cmd[0] = 0x35;
2398 rq->cmd_len = 12;
2399 rq->data = NULL;
2400 rq->data_len = 0;
2401 rq->timeout = 60 * HZ;
2402
2403 ret = blk_execute_rq(q, disk, rq, 0);
2404
2405 if (ret && error_sector)
2406 *error_sector = rq->sector;
2407
2408 blk_put_request(rq);
2409 return ret;
2410}
2411
2412EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2413
2414static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2376static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2415{ 2377{
2416 int rw = rq_data_dir(rq); 2378 int rw = rq_data_dir(rq);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 711d2f314ac3..94af920465b5 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -750,6 +750,14 @@ static int pf_ready(void)
750 750
751static struct request_queue *pf_queue; 751static struct request_queue *pf_queue;
752 752
753static void pf_end_request(int uptodate)
754{
755 if (pf_req) {
756 end_request(pf_req, uptodate);
757 pf_req = NULL;
758 }
759}
760
753static void do_pf_request(request_queue_t * q) 761static void do_pf_request(request_queue_t * q)
754{ 762{
755 if (pf_busy) 763 if (pf_busy)
@@ -765,7 +773,7 @@ repeat:
765 pf_count = pf_req->current_nr_sectors; 773 pf_count = pf_req->current_nr_sectors;
766 774
767 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
768 end_request(pf_req, 0); 776 pf_end_request(0);
769 goto repeat; 777 goto repeat;
770 } 778 }
771 779
@@ -780,7 +788,7 @@ repeat:
780 pi_do_claimed(pf_current->pi, do_pf_write); 788 pi_do_claimed(pf_current->pi, do_pf_write);
781 else { 789 else {
782 pf_busy = 0; 790 pf_busy = 0;
783 end_request(pf_req, 0); 791 pf_end_request(0);
784 goto repeat; 792 goto repeat;
785 } 793 }
786} 794}
@@ -798,9 +806,11 @@ static int pf_next_buf(void)
798 if (!pf_count) 806 if (!pf_count)
799 return 1; 807 return 1;
800 spin_lock_irqsave(&pf_spin_lock, saved_flags); 808 spin_lock_irqsave(&pf_spin_lock, saved_flags);
801 end_request(pf_req, 1); 809 pf_end_request(1);
802 pf_count = pf_req->current_nr_sectors; 810 if (pf_req) {
803 pf_buf = pf_req->buffer; 811 pf_count = pf_req->current_nr_sectors;
812 pf_buf = pf_req->buffer;
813 }
804 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 814 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
805 return 1; 815 return 1;
806} 816}
@@ -810,7 +820,7 @@ static inline void next_request(int success)
810 unsigned long saved_flags; 820 unsigned long saved_flags;
811 821
812 spin_lock_irqsave(&pf_spin_lock, saved_flags); 822 spin_lock_irqsave(&pf_spin_lock, saved_flags);
813 end_request(pf_req, success); 823 pf_end_request(success);
814 pf_busy = 0; 824 pf_busy = 0;
815 do_pf_request(pf_queue); 825 do_pf_request(pf_queue);
816 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 826 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7b838342f0a3..7e22a58926b8 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -5,29 +5,41 @@
5 * May be copied or modified under the terms of the GNU General Public 5 * May be copied or modified under the terms of the GNU General Public
6 * License. See linux/COPYING for more information. 6 * License. See linux/COPYING for more information.
7 * 7 *
8 * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and 8 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
9 * DVD-RW devices (aka an exercise in block layer masturbation) 9 * DVD-RAM devices.
10 * 10 *
11 * Theory of operation:
11 * 12 *
12 * TODO: (circa order of when I will fix it) 13 * At the lowest level, there is the standard driver for the CD/DVD device,
13 * - Only able to write on CD-RW media right now. 14 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
14 * - check host application code on media and set it in write page 15 * but it doesn't know anything about the special restrictions that apply to
15 * - interface for UDF <-> packet to negotiate a new location when a write 16 * packet writing. One restriction is that write requests must be aligned to
16 * fails. 17 * packet boundaries on the physical media, and the size of a write request
17 * - handle OPC, especially for -RW media 18 * must be equal to the packet size. Another restriction is that a
19 * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
20 * command, if the previous command was a write.
18 * 21 *
19 * Theory of operation: 22 * The purpose of the packet writing driver is to hide these restrictions from
23 * higher layers, such as file systems, and present a block device that can be
24 * randomly read and written using 2kB-sized blocks.
25 *
26 * The lowest layer in the packet writing driver is the packet I/O scheduler.
27 * Its data is defined by the struct packet_iosched and includes two bio
28 * queues with pending read and write requests. These queues are processed
29 * by the pkt_iosched_process_queue() function. The write requests in this
30 * queue are already properly aligned and sized. This layer is responsible for
31 * issuing the flush cache commands and scheduling the I/O in a good order.
20 * 32 *
21 * We use a custom make_request_fn function that forwards reads directly to 33 * The next layer transforms unaligned write requests to aligned writes. This
22 * the underlying CD device. Write requests are either attached directly to 34 * transformation requires reading missing pieces of data from the underlying
23 * a live packet_data object, or simply stored sequentially in a list for 35 * block device, assembling the pieces to full packets and queuing them to the
24 * later processing by the kcdrwd kernel thread. This driver doesn't use 36 * packet I/O scheduler.
25 * any elevator functionally as defined by the elevator_s struct, but the
26 * underlying CD device uses a standard elevator.
27 * 37 *
28 * This strategy makes it possible to do very late merging of IO requests. 38 * At the top layer there is a custom make_request_fn function that forwards
29 * A new bio sent to pkt_make_request can be merged with a live packet_data 39 * read requests directly to the iosched queue and puts write requests in the
30 * object even if the object is in the data gathering state. 40 * unaligned write queue. A kernel thread performs the necessary read
41 * gathering to convert the unaligned writes to aligned writes and then feeds
42 * them to the packet I/O scheduler.
31 * 43 *
32 *************************************************************************/ 44 *************************************************************************/
33 45
@@ -100,10 +112,9 @@ static struct bio *pkt_bio_alloc(int nr_iovecs)
100 goto no_bio; 112 goto no_bio;
101 bio_init(bio); 113 bio_init(bio);
102 114
103 bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL); 115 bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
104 if (!bvl) 116 if (!bvl)
105 goto no_bvl; 117 goto no_bvl;
106 memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec));
107 118
108 bio->bi_max_vecs = nr_iovecs; 119 bio->bi_max_vecs = nr_iovecs;
109 bio->bi_io_vec = bvl; 120 bio->bi_io_vec = bvl;
@@ -125,10 +136,9 @@ static struct packet_data *pkt_alloc_packet_data(void)
125 int i; 136 int i;
126 struct packet_data *pkt; 137 struct packet_data *pkt;
127 138
128 pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL); 139 pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
129 if (!pkt) 140 if (!pkt)
130 goto no_pkt; 141 goto no_pkt;
131 memset(pkt, 0, sizeof(struct packet_data));
132 142
133 pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE); 143 pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
134 if (!pkt->w_bio) 144 if (!pkt->w_bio)
@@ -659,7 +669,6 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, in
659 } 669 }
660 offs += CD_FRAMESIZE; 670 offs += CD_FRAMESIZE;
661 if (offs >= PAGE_SIZE) { 671 if (offs >= PAGE_SIZE) {
662 BUG_ON(offs > PAGE_SIZE);
663 offs = 0; 672 offs = 0;
664 p++; 673 p++;
665 } 674 }
@@ -724,12 +733,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
724 atomic_set(&pkt->io_wait, 0); 733 atomic_set(&pkt->io_wait, 0);
725 atomic_set(&pkt->io_errors, 0); 734 atomic_set(&pkt->io_errors, 0);
726 735
727 if (pkt->cache_valid) {
728 VPRINTK("pkt_gather_data: zone %llx cached\n",
729 (unsigned long long)pkt->sector);
730 goto out_account;
731 }
732
733 /* 736 /*
734 * Figure out which frames we need to read before we can write. 737 * Figure out which frames we need to read before we can write.
735 */ 738 */
@@ -738,6 +741,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
738 for (bio = pkt->orig_bios; bio; bio = bio->bi_next) { 741 for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
739 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); 742 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
740 int num_frames = bio->bi_size / CD_FRAMESIZE; 743 int num_frames = bio->bi_size / CD_FRAMESIZE;
744 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
741 BUG_ON(first_frame < 0); 745 BUG_ON(first_frame < 0);
742 BUG_ON(first_frame + num_frames > pkt->frames); 746 BUG_ON(first_frame + num_frames > pkt->frames);
743 for (f = first_frame; f < first_frame + num_frames; f++) 747 for (f = first_frame; f < first_frame + num_frames; f++)
@@ -745,6 +749,12 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
745 } 749 }
746 spin_unlock(&pkt->lock); 750 spin_unlock(&pkt->lock);
747 751
752 if (pkt->cache_valid) {
753 VPRINTK("pkt_gather_data: zone %llx cached\n",
754 (unsigned long long)pkt->sector);
755 goto out_account;
756 }
757
748 /* 758 /*
749 * Schedule reads for missing parts of the packet. 759 * Schedule reads for missing parts of the packet.
750 */ 760 */
@@ -778,7 +788,6 @@ out_account:
778 frames_read, (unsigned long long)pkt->sector); 788 frames_read, (unsigned long long)pkt->sector);
779 pd->stats.pkt_started++; 789 pd->stats.pkt_started++;
780 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); 790 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
781 pd->stats.secs_w += pd->settings.size;
782} 791}
783 792
784/* 793/*
@@ -794,10 +803,11 @@ static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zo
794 list_del_init(&pkt->list); 803 list_del_init(&pkt->list);
795 if (pkt->sector != zone) 804 if (pkt->sector != zone)
796 pkt->cache_valid = 0; 805 pkt->cache_valid = 0;
797 break; 806 return pkt;
798 } 807 }
799 } 808 }
800 return pkt; 809 BUG();
810 return NULL;
801} 811}
802 812
803static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) 813static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
@@ -941,12 +951,10 @@ try_next_bio:
941 } 951 }
942 952
943 pkt = pkt_get_packet_data(pd, zone); 953 pkt = pkt_get_packet_data(pd, zone);
944 BUG_ON(!pkt);
945 954
946 pd->current_sector = zone + pd->settings.size; 955 pd->current_sector = zone + pd->settings.size;
947 pkt->sector = zone; 956 pkt->sector = zone;
948 pkt->frames = pd->settings.size >> 2; 957 pkt->frames = pd->settings.size >> 2;
949 BUG_ON(pkt->frames > PACKET_MAX_SIZE);
950 pkt->write_size = 0; 958 pkt->write_size = 0;
951 959
952 /* 960 /*
@@ -1636,6 +1644,10 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1636 printk("pktcdvd: detected zero packet size!\n"); 1644 printk("pktcdvd: detected zero packet size!\n");
1637 pd->settings.size = 128; 1645 pd->settings.size = 128;
1638 } 1646 }
1647 if (pd->settings.size > PACKET_MAX_SECTORS) {
1648 printk("pktcdvd: packet size is too big\n");
1649 return -ENXIO;
1650 }
1639 pd->settings.fp = ti.fp; 1651 pd->settings.fp = ti.fp;
1640 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); 1652 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1641 1653
@@ -2198,7 +2210,6 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio)
2198 * No matching packet found. Store the bio in the work queue. 2210 * No matching packet found. Store the bio in the work queue.
2199 */ 2211 */
2200 node = mempool_alloc(pd->rb_pool, GFP_NOIO); 2212 node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2201 BUG_ON(!node);
2202 node->bio = bio; 2213 node->bio = bio;
2203 spin_lock(&pd->lock); 2214 spin_lock(&pd->lock);
2204 BUG_ON(pd->bio_queue_size < 0); 2215 BUG_ON(pd->bio_queue_size < 0);
@@ -2406,7 +2417,6 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
2406 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data; 2417 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2407 2418
2408 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode)); 2419 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
2409 BUG_ON(!pd);
2410 2420
2411 switch (cmd) { 2421 switch (cmd) {
2412 /* 2422 /*
@@ -2477,10 +2487,9 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2477 return -EBUSY; 2487 return -EBUSY;
2478 } 2488 }
2479 2489
2480 pd = kmalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); 2490 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2481 if (!pd) 2491 if (!pd)
2482 return ret; 2492 return ret;
2483 memset(pd, 0, sizeof(struct pktcdvd_device));
2484 2493
2485 pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL); 2494 pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
2486 if (!pd->rb_pool) 2495 if (!pd->rb_pool)
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
index 856c2278e9d0..079ec344eb47 100644
--- a/drivers/block/scsi_ioctl.c
+++ b/drivers/block/scsi_ioctl.c
@@ -168,6 +168,7 @@ static int verify_command(struct file *file, unsigned char *cmd)
168 safe_for_write(WRITE_VERIFY_12), 168 safe_for_write(WRITE_VERIFY_12),
169 safe_for_write(WRITE_16), 169 safe_for_write(WRITE_16),
170 safe_for_write(WRITE_LONG), 170 safe_for_write(WRITE_LONG),
171 safe_for_write(WRITE_LONG_2),
171 safe_for_write(ERASE), 172 safe_for_write(ERASE),
172 safe_for_write(GPCMD_MODE_SELECT_10), 173 safe_for_write(GPCMD_MODE_SELECT_10),
173 safe_for_write(MODE_SELECT), 174 safe_for_write(MODE_SELECT),
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index aa0bf7ee008d..ed4d5006fe62 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -172,7 +172,7 @@ struct bulk_cs_wrap {
172 */ 172 */
173struct ub_dev; 173struct ub_dev;
174 174
175#define UB_MAX_REQ_SG 4 175#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
176#define UB_MAX_SECTORS 64 176#define UB_MAX_SECTORS 64
177 177
178/* 178/*
@@ -387,7 +387,7 @@ struct ub_dev {
387 struct bulk_cs_wrap work_bcs; 387 struct bulk_cs_wrap work_bcs;
388 struct usb_ctrlrequest work_cr; 388 struct usb_ctrlrequest work_cr;
389 389
390 int sg_stat[UB_MAX_REQ_SG+1]; 390 int sg_stat[6];
391 struct ub_scsi_trace tr; 391 struct ub_scsi_trace tr;
392}; 392};
393 393
@@ -525,12 +525,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
525 "qlen %d qmax %d\n", 525 "qlen %d qmax %d\n",
526 sc->cmd_queue.qlen, sc->cmd_queue.qmax); 526 sc->cmd_queue.qlen, sc->cmd_queue.qmax);
527 cnt += sprintf(page + cnt, 527 cnt += sprintf(page + cnt,
528 "sg %d %d %d %d %d\n", 528 "sg %d %d %d %d %d .. %d\n",
529 sc->sg_stat[0], 529 sc->sg_stat[0],
530 sc->sg_stat[1], 530 sc->sg_stat[1],
531 sc->sg_stat[2], 531 sc->sg_stat[2],
532 sc->sg_stat[3], 532 sc->sg_stat[3],
533 sc->sg_stat[4]); 533 sc->sg_stat[4],
534 sc->sg_stat[5]);
534 535
535 list_for_each (p, &sc->luns) { 536 list_for_each (p, &sc->luns) {
536 lun = list_entry(p, struct ub_lun, link); 537 lun = list_entry(p, struct ub_lun, link);
@@ -835,7 +836,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
835 return -1; 836 return -1;
836 } 837 }
837 cmd->nsg = n_elem; 838 cmd->nsg = n_elem;
838 sc->sg_stat[n_elem]++; 839 sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
839 840
840 /* 841 /*
841 * build the command 842 * build the command
@@ -891,7 +892,7 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
891 return -1; 892 return -1;
892 } 893 }
893 cmd->nsg = n_elem; 894 cmd->nsg = n_elem;
894 sc->sg_stat[n_elem]++; 895 sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
895 896
896 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 897 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
897 cmd->cdb_len = rq->cmd_len; 898 cmd->cdb_len = rq->cmd_len;
@@ -1010,7 +1011,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1010 sc->last_pipe = sc->send_bulk_pipe; 1011 sc->last_pipe = sc->send_bulk_pipe;
1011 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, 1012 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
1012 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); 1013 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
1013 sc->work_urb.transfer_flags = 0;
1014 1014
1015 /* Fill what we shouldn't be filling, because usb-storage did so. */ 1015 /* Fill what we shouldn't be filling, because usb-storage did so. */
1016 sc->work_urb.actual_length = 0; 1016 sc->work_urb.actual_length = 0;
@@ -1019,7 +1019,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1019 1019
1020 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1020 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1021 /* XXX Clear stalls */ 1021 /* XXX Clear stalls */
1022 printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */
1023 ub_complete(&sc->work_done); 1022 ub_complete(&sc->work_done);
1024 return rc; 1023 return rc;
1025 } 1024 }
@@ -1190,11 +1189,9 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1190 return; 1189 return;
1191 } 1190 }
1192 if (urb->status != 0) { 1191 if (urb->status != 0) {
1193 printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */
1194 goto Bad_End; 1192 goto Bad_End;
1195 } 1193 }
1196 if (urb->actual_length != US_BULK_CB_WRAP_LEN) { 1194 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1197 printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */
1198 /* XXX Must do reset here to unconfuse the device */ 1195 /* XXX Must do reset here to unconfuse the device */
1199 goto Bad_End; 1196 goto Bad_End;
1200 } 1197 }
@@ -1395,14 +1392,12 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1395 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, 1392 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1396 page_address(sg->page) + sg->offset, sg->length, 1393 page_address(sg->page) + sg->offset, sg->length,
1397 ub_urb_complete, sc); 1394 ub_urb_complete, sc);
1398 sc->work_urb.transfer_flags = 0;
1399 sc->work_urb.actual_length = 0; 1395 sc->work_urb.actual_length = 0;
1400 sc->work_urb.error_count = 0; 1396 sc->work_urb.error_count = 0;
1401 sc->work_urb.status = 0; 1397 sc->work_urb.status = 0;
1402 1398
1403 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { 1399 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1404 /* XXX Clear stalls */ 1400 /* XXX Clear stalls */
1405 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1406 ub_complete(&sc->work_done); 1401 ub_complete(&sc->work_done);
1407 ub_state_done(sc, cmd, rc); 1402 ub_state_done(sc, cmd, rc);
1408 return; 1403 return;
@@ -1442,7 +1437,6 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1442 sc->last_pipe = sc->recv_bulk_pipe; 1437 sc->last_pipe = sc->recv_bulk_pipe;
1443 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, 1438 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1444 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); 1439 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1445 sc->work_urb.transfer_flags = 0;
1446 sc->work_urb.actual_length = 0; 1440 sc->work_urb.actual_length = 0;
1447 sc->work_urb.error_count = 0; 1441 sc->work_urb.error_count = 0;
1448 sc->work_urb.status = 0; 1442 sc->work_urb.status = 0;
@@ -1563,7 +1557,6 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1563 1557
1564 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 1558 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1565 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); 1559 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1566 sc->work_urb.transfer_flags = 0;
1567 sc->work_urb.actual_length = 0; 1560 sc->work_urb.actual_length = 0;
1568 sc->work_urb.error_count = 0; 1561 sc->work_urb.error_count = 0;
1569 sc->work_urb.status = 0; 1562 sc->work_urb.status = 0;
@@ -2000,17 +1993,16 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
2000 1993
2001 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, 1994 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
2002 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); 1995 (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
2003 sc->work_urb.transfer_flags = 0;
2004 sc->work_urb.actual_length = 0; 1996 sc->work_urb.actual_length = 0;
2005 sc->work_urb.error_count = 0; 1997 sc->work_urb.error_count = 0;
2006 sc->work_urb.status = 0; 1998 sc->work_urb.status = 0;
2007 1999
2008 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { 2000 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
2009 if (rc == -EPIPE) { 2001 if (rc == -EPIPE) {
2010 printk("%s: Stall at GetMaxLUN, using 1 LUN\n", 2002 printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n",
2011 sc->name); /* P3 */ 2003 sc->name); /* P3 */
2012 } else { 2004 } else {
2013 printk(KERN_WARNING 2005 printk(KERN_NOTICE
2014 "%s: Unable to submit GetMaxLUN (%d)\n", 2006 "%s: Unable to submit GetMaxLUN (%d)\n",
2015 sc->name, rc); 2007 sc->name, rc);
2016 } 2008 }
@@ -2028,6 +2020,18 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
2028 del_timer_sync(&timer); 2020 del_timer_sync(&timer);
2029 usb_kill_urb(&sc->work_urb); 2021 usb_kill_urb(&sc->work_urb);
2030 2022
2023 if ((rc = sc->work_urb.status) < 0) {
2024 if (rc == -EPIPE) {
2025 printk("%s: Stall at GetMaxLUN, using 1 LUN\n",
2026 sc->name); /* P3 */
2027 } else {
2028 printk(KERN_NOTICE
2029 "%s: Error at GetMaxLUN (%d)\n",
2030 sc->name, rc);
2031 }
2032 goto err_io;
2033 }
2034
2031 if (sc->work_urb.actual_length != 1) { 2035 if (sc->work_urb.actual_length != 1) {
2032 printk("%s: GetMaxLUN returned %d bytes\n", sc->name, 2036 printk("%s: GetMaxLUN returned %d bytes\n", sc->name,
2033 sc->work_urb.actual_length); /* P3 */ 2037 sc->work_urb.actual_length); /* P3 */
@@ -2048,6 +2052,7 @@ static int ub_sync_getmaxlun(struct ub_dev *sc)
2048 kfree(p); 2052 kfree(p);
2049 return nluns; 2053 return nluns;
2050 2054
2055err_io:
2051err_submit: 2056err_submit:
2052 kfree(p); 2057 kfree(p);
2053err_alloc: 2058err_alloc:
@@ -2080,7 +2085,6 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
2080 2085
2081 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, 2086 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
2082 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); 2087 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
2083 sc->work_urb.transfer_flags = 0;
2084 sc->work_urb.actual_length = 0; 2088 sc->work_urb.actual_length = 0;
2085 sc->work_urb.error_count = 0; 2089 sc->work_urb.error_count = 0;
2086 sc->work_urb.status = 0; 2090 sc->work_urb.status = 0;
@@ -2213,8 +2217,10 @@ static int ub_probe(struct usb_interface *intf,
2213 * This is needed to clear toggles. It is a problem only if we do 2217 * This is needed to clear toggles. It is a problem only if we do
2214 * `rmmod ub && modprobe ub` without disconnects, but we like that. 2218 * `rmmod ub && modprobe ub` without disconnects, but we like that.
2215 */ 2219 */
2220#if 0 /* iPod Mini fails if we do this (big white iPod works) */
2216 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2221 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
2217 ub_probe_clear_stall(sc, sc->send_bulk_pipe); 2222 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
2223#endif
2218 2224
2219 /* 2225 /*
2220 * The way this is used by the startup code is a little specific. 2226 * The way this is used by the startup code is a little specific.
@@ -2241,10 +2247,10 @@ static int ub_probe(struct usb_interface *intf,
2241 for (i = 0; i < 3; i++) { 2247 for (i = 0; i < 3; i++) {
2242 if ((rc = ub_sync_getmaxlun(sc)) < 0) { 2248 if ((rc = ub_sync_getmaxlun(sc)) < 0) {
2243 /* 2249 /*
2244 * Some devices (i.e. Iomega Zip100) need this -- 2250 * This segment is taken from usb-storage. They say
2245 * apparently the bulk pipes get STALLed when the 2251 * that ZIP-100 needs this, but my own ZIP-100 works
2246 * GetMaxLUN request is processed. 2252 * fine without this.
2247 * XXX I have a ZIP-100, verify it does this. 2253 * Still, it does not seem to hurt anything.
2248 */ 2254 */
2249 if (rc == -EPIPE) { 2255 if (rc == -EPIPE) {
2250 ub_probe_clear_stall(sc, sc->recv_bulk_pipe); 2256 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
@@ -2313,7 +2319,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
2313 disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; 2319 disk->first_minor = lun->id * UB_MINORS_PER_MAJOR;
2314 disk->fops = &ub_bd_fops; 2320 disk->fops = &ub_bd_fops;
2315 disk->private_data = lun; 2321 disk->private_data = lun;
2316 disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ 2322 disk->driverfs_dev = &sc->intf->dev;
2317 2323
2318 rc = -ENOMEM; 2324 rc = -ENOMEM;
2319 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) 2325 if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL)
@@ -2466,9 +2472,6 @@ static int __init ub_init(void)
2466{ 2472{
2467 int rc; 2473 int rc;
2468 2474
2469 /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n",
2470 sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun));
2471
2472 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) 2475 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2473 goto err_regblkdev; 2476 goto err_regblkdev;
2474 devfs_mk_dir(DEVFS_NAME); 2477 devfs_mk_dir(DEVFS_NAME);
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 67d96b5cbb96..57c48bbf6fe6 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -65,13 +65,15 @@
65#endif 65#endif
66 66
67static int ignore = 0; 67static int ignore = 0;
68static int ignore_csr = 0;
69static int ignore_sniffer = 0;
68static int reset = 0; 70static int reset = 0;
69 71
70#ifdef CONFIG_BT_HCIUSB_SCO 72#ifdef CONFIG_BT_HCIUSB_SCO
71static int isoc = 2; 73static int isoc = 2;
72#endif 74#endif
73 75
74#define VERSION "2.8" 76#define VERSION "2.9"
75 77
76static struct usb_driver hci_usb_driver; 78static struct usb_driver hci_usb_driver;
77 79
@@ -98,6 +100,9 @@ static struct usb_device_id bluetooth_ids[] = {
98MODULE_DEVICE_TABLE (usb, bluetooth_ids); 100MODULE_DEVICE_TABLE (usb, bluetooth_ids);
99 101
100static struct usb_device_id blacklist_ids[] = { 102static struct usb_device_id blacklist_ids[] = {
103 /* CSR BlueCore devices */
104 { USB_DEVICE(0x0a12, 0x0001), .driver_info = HCI_CSR },
105
101 /* Broadcom BCM2033 without firmware */ 106 /* Broadcom BCM2033 without firmware */
102 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE }, 107 { USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
103 108
@@ -836,6 +841,12 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
836 if (ignore || id->driver_info & HCI_IGNORE) 841 if (ignore || id->driver_info & HCI_IGNORE)
837 return -ENODEV; 842 return -ENODEV;
838 843
844 if (ignore_csr && id->driver_info & HCI_CSR)
845 return -ENODEV;
846
847 if (ignore_sniffer && id->driver_info & HCI_SNIFFER)
848 return -ENODEV;
849
839 if (intf->cur_altsetting->desc.bInterfaceNumber > 0) 850 if (intf->cur_altsetting->desc.bInterfaceNumber > 0)
840 return -ENODEV; 851 return -ENODEV;
841 852
@@ -1061,6 +1072,12 @@ module_exit(hci_usb_exit);
1061module_param(ignore, bool, 0644); 1072module_param(ignore, bool, 0644);
1062MODULE_PARM_DESC(ignore, "Ignore devices from the matching table"); 1073MODULE_PARM_DESC(ignore, "Ignore devices from the matching table");
1063 1074
1075module_param(ignore_csr, bool, 0644);
1076MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
1077
1078module_param(ignore_sniffer, bool, 0644);
1079MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
1080
1064module_param(reset, bool, 0644); 1081module_param(reset, bool, 0644);
1065MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); 1082MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
1066 1083
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 29936b43d4f8..37100a6ea1a8 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -31,9 +31,10 @@
31#define HCI_IGNORE 0x01 31#define HCI_IGNORE 0x01
32#define HCI_RESET 0x02 32#define HCI_RESET 0x02
33#define HCI_DIGIANSWER 0x04 33#define HCI_DIGIANSWER 0x04
34#define HCI_SNIFFER 0x08 34#define HCI_CSR 0x08
35#define HCI_BROKEN_ISOC 0x10 35#define HCI_SNIFFER 0x10
36#define HCI_BCM92035 0x20 36#define HCI_BCM92035 0x20
37#define HCI_BROKEN_ISOC 0x40
37 38
38#define HCI_MAX_IFACE_NUM 3 39#define HCI_MAX_IFACE_NUM 3
39 40
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 99762b6c19ae..de5d6d212674 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -252,7 +252,7 @@ hp_zx1_configure (void)
252 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE); 252 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
253 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG); 253 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
254 readl(hp->ioc_regs+HP_ZX1_TCNFG); 254 readl(hp->ioc_regs+HP_ZX1_TCNFG);
255 writel(~(HP_ZX1_IOVA_SIZE-1), hp->ioc_regs+HP_ZX1_IMASK); 255 writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
256 readl(hp->ioc_regs+HP_ZX1_IMASK); 256 readl(hp->ioc_regs+HP_ZX1_IMASK);
257 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE); 257 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
258 readl(hp->ioc_regs+HP_ZX1_IBASE); 258 readl(hp->ioc_regs+HP_ZX1_IBASE);
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index 2a36561eec68..a124f8c5d062 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -2053,10 +2053,6 @@ static int __init rs_init(void)
2053 state->icount.rx = state->icount.tx = 0; 2053 state->icount.rx = state->icount.tx = 0;
2054 state->icount.frame = state->icount.parity = 0; 2054 state->icount.frame = state->icount.parity = 0;
2055 state->icount.overrun = state->icount.brk = 0; 2055 state->icount.overrun = state->icount.brk = 0;
2056 /*
2057 if(state->port && check_region(state->port,REGION_LENGTH(state)))
2058 continue;
2059 */
2060 2056
2061 printk(KERN_INFO "ttyS%d is the amiga builtin serial port\n", 2057 printk(KERN_INFO "ttyS%d is the amiga builtin serial port\n",
2062 state->line); 2058 state->line);
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 6ba48f346fcf..041bb47b5c39 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -376,7 +376,7 @@ static int __init drm_core_init(void)
376 goto err_p2; 376 goto err_p2;
377 } 377 }
378 378
379 drm_proc_root = create_proc_entry("dri", S_IFDIR, NULL); 379 drm_proc_root = proc_mkdir("dri", NULL);
380 if (!drm_proc_root) { 380 if (!drm_proc_root) {
381 DRM_ERROR("Cannot create /proc/dri\n"); 381 DRM_ERROR("Cannot create /proc/dri\n");
382 ret = -1; 382 ret = -1;
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 32d2bb99462c..977961002488 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -95,7 +95,7 @@ int drm_proc_init(drm_device_t *dev, int minor,
95 char name[64]; 95 char name[64];
96 96
97 sprintf(name, "%d", minor); 97 sprintf(name, "%d", minor);
98 *dev_root = create_proc_entry(name, S_IFDIR, root); 98 *dev_root = proc_mkdir(name, root);
99 if (!*dev_root) { 99 if (!*dev_root) {
100 DRM_ERROR("Cannot create /proc/dri/%s\n", name); 100 DRM_ERROR("Cannot create /proc/dri/%s\n", name);
101 return -1; 101 return -1;
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 58d3738a2b7f..407708a001e4 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -534,7 +534,7 @@ static void shutdown(struct channel *ch)
534 534
535 unsigned long flags; 535 unsigned long flags;
536 struct tty_struct *tty; 536 struct tty_struct *tty;
537 struct board_chan *bc; 537 struct board_chan __iomem *bc;
538 538
539 if (!(ch->asyncflags & ASYNC_INITIALIZED)) 539 if (!(ch->asyncflags & ASYNC_INITIALIZED))
540 return; 540 return;
@@ -618,7 +618,7 @@ static int pc_write(struct tty_struct * tty,
618 struct channel *ch; 618 struct channel *ch;
619 unsigned long flags; 619 unsigned long flags;
620 int remain; 620 int remain;
621 struct board_chan *bc; 621 struct board_chan __iomem *bc;
622 622
623 /* ---------------------------------------------------------------- 623 /* ----------------------------------------------------------------
624 pc_write is primarily called directly by the kernel routine 624 pc_write is primarily called directly by the kernel routine
@@ -685,7 +685,7 @@ static int pc_write(struct tty_struct * tty,
685 ------------------------------------------------------------------- */ 685 ------------------------------------------------------------------- */
686 686
687 dataLen = min(bytesAvailable, dataLen); 687 dataLen = min(bytesAvailable, dataLen);
688 memcpy(ch->txptr + head, buf, dataLen); 688 memcpy_toio(ch->txptr + head, buf, dataLen);
689 buf += dataLen; 689 buf += dataLen;
690 head += dataLen; 690 head += dataLen;
691 amountCopied += dataLen; 691 amountCopied += dataLen;
@@ -726,7 +726,7 @@ static int pc_write_room(struct tty_struct *tty)
726 struct channel *ch; 726 struct channel *ch;
727 unsigned long flags; 727 unsigned long flags;
728 unsigned int head, tail; 728 unsigned int head, tail;
729 struct board_chan *bc; 729 struct board_chan __iomem *bc;
730 730
731 remain = 0; 731 remain = 0;
732 732
@@ -773,7 +773,7 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
773 int remain; 773 int remain;
774 unsigned long flags; 774 unsigned long flags;
775 struct channel *ch; 775 struct channel *ch;
776 struct board_chan *bc; 776 struct board_chan __iomem *bc;
777 777
778 /* --------------------------------------------------------- 778 /* ---------------------------------------------------------
779 verifyChannel returns the channel from the tty struct 779 verifyChannel returns the channel from the tty struct
@@ -830,7 +830,7 @@ static void pc_flush_buffer(struct tty_struct *tty)
830 unsigned int tail; 830 unsigned int tail;
831 unsigned long flags; 831 unsigned long flags;
832 struct channel *ch; 832 struct channel *ch;
833 struct board_chan *bc; 833 struct board_chan __iomem *bc;
834 /* --------------------------------------------------------- 834 /* ---------------------------------------------------------
835 verifyChannel returns the channel from the tty struct 835 verifyChannel returns the channel from the tty struct
836 if it is valid. This serves as a sanity check. 836 if it is valid. This serves as a sanity check.
@@ -976,7 +976,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
976 struct channel *ch; 976 struct channel *ch;
977 unsigned long flags; 977 unsigned long flags;
978 int line, retval, boardnum; 978 int line, retval, boardnum;
979 struct board_chan *bc; 979 struct board_chan __iomem *bc;
980 unsigned int head; 980 unsigned int head;
981 981
982 line = tty->index; 982 line = tty->index;
@@ -1041,7 +1041,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1041 ch->statusflags = 0; 1041 ch->statusflags = 0;
1042 1042
1043 /* Save boards current modem status */ 1043 /* Save boards current modem status */
1044 ch->imodem = bc->mstat; 1044 ch->imodem = readb(&bc->mstat);
1045 1045
1046 /* ---------------------------------------------------------------- 1046 /* ----------------------------------------------------------------
1047 Set receive head and tail ptrs to each other. This indicates 1047 Set receive head and tail ptrs to each other. This indicates
@@ -1399,10 +1399,10 @@ static void post_fep_init(unsigned int crd)
1399{ /* Begin post_fep_init */ 1399{ /* Begin post_fep_init */
1400 1400
1401 int i; 1401 int i;
1402 unsigned char *memaddr; 1402 void __iomem *memaddr;
1403 struct global_data *gd; 1403 struct global_data __iomem *gd;
1404 struct board_info *bd; 1404 struct board_info *bd;
1405 struct board_chan *bc; 1405 struct board_chan __iomem *bc;
1406 struct channel *ch; 1406 struct channel *ch;
1407 int shrinkmem = 0, lowwater ; 1407 int shrinkmem = 0, lowwater ;
1408 1408
@@ -1461,7 +1461,7 @@ static void post_fep_init(unsigned int crd)
1461 8 and 64 of these structures. 1461 8 and 64 of these structures.
1462 -------------------------------------------------------------------- */ 1462 -------------------------------------------------------------------- */
1463 1463
1464 bc = (struct board_chan *)(memaddr + CHANSTRUCT); 1464 bc = memaddr + CHANSTRUCT;
1465 1465
1466 /* ------------------------------------------------------------------- 1466 /* -------------------------------------------------------------------
1467 The below assignment will set gd to point at the BEGINING of 1467 The below assignment will set gd to point at the BEGINING of
@@ -1470,7 +1470,7 @@ static void post_fep_init(unsigned int crd)
1470 pointer begins at 0xd10. 1470 pointer begins at 0xd10.
1471 ---------------------------------------------------------------------- */ 1471 ---------------------------------------------------------------------- */
1472 1472
1473 gd = (struct global_data *)(memaddr + GLOBAL); 1473 gd = memaddr + GLOBAL;
1474 1474
1475 /* -------------------------------------------------------------------- 1475 /* --------------------------------------------------------------------
1476 XEPORTS (address 0xc22) points at the number of channels the 1476 XEPORTS (address 0xc22) points at the number of channels the
@@ -1493,6 +1493,7 @@ static void post_fep_init(unsigned int crd)
1493 1493
1494 for (i = 0; i < bd->numports; i++, ch++, bc++) { /* Begin for each port */ 1494 for (i = 0; i < bd->numports; i++, ch++, bc++) { /* Begin for each port */
1495 unsigned long flags; 1495 unsigned long flags;
1496 u16 tseg, rseg;
1496 1497
1497 ch->brdchan = bc; 1498 ch->brdchan = bc;
1498 ch->mailbox = gd; 1499 ch->mailbox = gd;
@@ -1553,50 +1554,53 @@ static void post_fep_init(unsigned int crd)
1553 shrinkmem = 0; 1554 shrinkmem = 0;
1554 } 1555 }
1555 1556
1557 tseg = readw(&bc->tseg);
1558 rseg = readw(&bc->rseg);
1559
1556 switch (bd->type) { 1560 switch (bd->type) {
1557 1561
1558 case PCIXEM: 1562 case PCIXEM:
1559 case PCIXRJ: 1563 case PCIXRJ:
1560 case PCIXR: 1564 case PCIXR:
1561 /* Cover all the 2MEG cards */ 1565 /* Cover all the 2MEG cards */
1562 ch->txptr = memaddr + (((bc->tseg) << 4) & 0x1fffff); 1566 ch->txptr = memaddr + ((tseg << 4) & 0x1fffff);
1563 ch->rxptr = memaddr + (((bc->rseg) << 4) & 0x1fffff); 1567 ch->rxptr = memaddr + ((rseg << 4) & 0x1fffff);
1564 ch->txwin = FEPWIN | ((bc->tseg) >> 11); 1568 ch->txwin = FEPWIN | (tseg >> 11);
1565 ch->rxwin = FEPWIN | ((bc->rseg) >> 11); 1569 ch->rxwin = FEPWIN | (rseg >> 11);
1566 break; 1570 break;
1567 1571
1568 case PCXEM: 1572 case PCXEM:
1569 case EISAXEM: 1573 case EISAXEM:
1570 /* Cover all the 32K windowed cards */ 1574 /* Cover all the 32K windowed cards */
1571 /* Mask equal to window size - 1 */ 1575 /* Mask equal to window size - 1 */
1572 ch->txptr = memaddr + (((bc->tseg) << 4) & 0x7fff); 1576 ch->txptr = memaddr + ((tseg << 4) & 0x7fff);
1573 ch->rxptr = memaddr + (((bc->rseg) << 4) & 0x7fff); 1577 ch->rxptr = memaddr + ((rseg << 4) & 0x7fff);
1574 ch->txwin = FEPWIN | ((bc->tseg) >> 11); 1578 ch->txwin = FEPWIN | (tseg >> 11);
1575 ch->rxwin = FEPWIN | ((bc->rseg) >> 11); 1579 ch->rxwin = FEPWIN | (rseg >> 11);
1576 break; 1580 break;
1577 1581
1578 case PCXEVE: 1582 case PCXEVE:
1579 case PCXE: 1583 case PCXE:
1580 ch->txptr = memaddr + (((bc->tseg - bd->memory_seg) << 4) & 0x1fff); 1584 ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff);
1581 ch->txwin = FEPWIN | ((bc->tseg - bd->memory_seg) >> 9); 1585 ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9);
1582 ch->rxptr = memaddr + (((bc->rseg - bd->memory_seg) << 4) & 0x1fff); 1586 ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff);
1583 ch->rxwin = FEPWIN | ((bc->rseg - bd->memory_seg) >>9 ); 1587 ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >>9 );
1584 break; 1588 break;
1585 1589
1586 case PCXI: 1590 case PCXI:
1587 case PC64XE: 1591 case PC64XE:
1588 ch->txptr = memaddr + ((bc->tseg - bd->memory_seg) << 4); 1592 ch->txptr = memaddr + ((tseg - bd->memory_seg) << 4);
1589 ch->rxptr = memaddr + ((bc->rseg - bd->memory_seg) << 4); 1593 ch->rxptr = memaddr + ((rseg - bd->memory_seg) << 4);
1590 ch->txwin = ch->rxwin = 0; 1594 ch->txwin = ch->rxwin = 0;
1591 break; 1595 break;
1592 1596
1593 } /* End switch bd->type */ 1597 } /* End switch bd->type */
1594 1598
1595 ch->txbufhead = 0; 1599 ch->txbufhead = 0;
1596 ch->txbufsize = bc->tmax + 1; 1600 ch->txbufsize = readw(&bc->tmax) + 1;
1597 1601
1598 ch->rxbufhead = 0; 1602 ch->rxbufhead = 0;
1599 ch->rxbufsize = bc->rmax + 1; 1603 ch->rxbufsize = readw(&bc->rmax) + 1;
1600 1604
1601 lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2); 1605 lowwater = ch->txbufsize >= 2000 ? 1024 : (ch->txbufsize / 2);
1602 1606
@@ -1718,11 +1722,11 @@ static void epcapoll(unsigned long ignored)
1718static void doevent(int crd) 1722static void doevent(int crd)
1719{ /* Begin doevent */ 1723{ /* Begin doevent */
1720 1724
1721 void *eventbuf; 1725 void __iomem *eventbuf;
1722 struct channel *ch, *chan0; 1726 struct channel *ch, *chan0;
1723 static struct tty_struct *tty; 1727 static struct tty_struct *tty;
1724 struct board_info *bd; 1728 struct board_info *bd;
1725 struct board_chan *bc; 1729 struct board_chan __iomem *bc;
1726 unsigned int tail, head; 1730 unsigned int tail, head;
1727 int event, channel; 1731 int event, channel;
1728 int mstat, lstat; 1732 int mstat, lstat;
@@ -1817,7 +1821,7 @@ static void doevent(int crd)
1817static void fepcmd(struct channel *ch, int cmd, int word_or_byte, 1821static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
1818 int byte2, int ncmds, int bytecmd) 1822 int byte2, int ncmds, int bytecmd)
1819{ /* Begin fepcmd */ 1823{ /* Begin fepcmd */
1820 unchar *memaddr; 1824 unchar __iomem *memaddr;
1821 unsigned int head, cmdTail, cmdStart, cmdMax; 1825 unsigned int head, cmdTail, cmdStart, cmdMax;
1822 long count; 1826 long count;
1823 int n; 1827 int n;
@@ -2000,7 +2004,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2000 2004
2001 unsigned int cmdHead; 2005 unsigned int cmdHead;
2002 struct termios *ts; 2006 struct termios *ts;
2003 struct board_chan *bc; 2007 struct board_chan __iomem *bc;
2004 unsigned mval, hflow, cflag, iflag; 2008 unsigned mval, hflow, cflag, iflag;
2005 2009
2006 bc = ch->brdchan; 2010 bc = ch->brdchan;
@@ -2010,7 +2014,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2010 ts = tty->termios; 2014 ts = tty->termios;
2011 if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */ 2015 if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */
2012 cmdHead = readw(&bc->rin); 2016 cmdHead = readw(&bc->rin);
2013 bc->rout = cmdHead; 2017 writew(cmdHead, &bc->rout);
2014 cmdHead = readw(&bc->tin); 2018 cmdHead = readw(&bc->tin);
2015 /* Changing baud in mid-stream transmission can be wonderful */ 2019 /* Changing baud in mid-stream transmission can be wonderful */
2016 /* --------------------------------------------------------------- 2020 /* ---------------------------------------------------------------
@@ -2116,7 +2120,7 @@ static void receive_data(struct channel *ch)
2116 unchar *rptr; 2120 unchar *rptr;
2117 struct termios *ts = NULL; 2121 struct termios *ts = NULL;
2118 struct tty_struct *tty; 2122 struct tty_struct *tty;
2119 struct board_chan *bc; 2123 struct board_chan __iomem *bc;
2120 int dataToRead, wrapgap, bytesAvailable; 2124 int dataToRead, wrapgap, bytesAvailable;
2121 unsigned int tail, head; 2125 unsigned int tail, head;
2122 unsigned int wrapmask; 2126 unsigned int wrapmask;
@@ -2154,7 +2158,7 @@ static void receive_data(struct channel *ch)
2154 --------------------------------------------------------------------- */ 2158 --------------------------------------------------------------------- */
2155 2159
2156 if (!tty || !ts || !(ts->c_cflag & CREAD)) { 2160 if (!tty || !ts || !(ts->c_cflag & CREAD)) {
2157 bc->rout = head; 2161 writew(head, &bc->rout);
2158 return; 2162 return;
2159 } 2163 }
2160 2164
@@ -2270,7 +2274,7 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2270static int pc_tiocmget(struct tty_struct *tty, struct file *file) 2274static int pc_tiocmget(struct tty_struct *tty, struct file *file)
2271{ 2275{
2272 struct channel *ch = (struct channel *) tty->driver_data; 2276 struct channel *ch = (struct channel *) tty->driver_data;
2273 struct board_chan *bc; 2277 struct board_chan __iomem *bc;
2274 unsigned int mstat, mflag = 0; 2278 unsigned int mstat, mflag = 0;
2275 unsigned long flags; 2279 unsigned long flags;
2276 2280
@@ -2351,7 +2355,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2351 unsigned long flags; 2355 unsigned long flags;
2352 unsigned int mflag, mstat; 2356 unsigned int mflag, mstat;
2353 unsigned char startc, stopc; 2357 unsigned char startc, stopc;
2354 struct board_chan *bc; 2358 struct board_chan __iomem *bc;
2355 struct channel *ch = (struct channel *) tty->driver_data; 2359 struct channel *ch = (struct channel *) tty->driver_data;
2356 void __user *argp = (void __user *)arg; 2360 void __user *argp = (void __user *)arg;
2357 2361
@@ -2633,7 +2637,7 @@ static void pc_start(struct tty_struct *tty)
2633 spin_lock_irqsave(&epca_lock, flags); 2637 spin_lock_irqsave(&epca_lock, flags);
2634 /* Just in case output was resumed because of a change in Digi-flow */ 2638 /* Just in case output was resumed because of a change in Digi-flow */
2635 if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */ 2639 if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */
2636 struct board_chan *bc; 2640 struct board_chan __iomem *bc;
2637 globalwinon(ch); 2641 globalwinon(ch);
2638 bc = ch->brdchan; 2642 bc = ch->brdchan;
2639 if (ch->statusflags & LOWWAIT) 2643 if (ch->statusflags & LOWWAIT)
@@ -2727,7 +2731,7 @@ void digi_send_break(struct channel *ch, int msec)
2727static void setup_empty_event(struct tty_struct *tty, struct channel *ch) 2731static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
2728{ /* Begin setup_empty_event */ 2732{ /* Begin setup_empty_event */
2729 2733
2730 struct board_chan *bc = ch->brdchan; 2734 struct board_chan __iomem *bc = ch->brdchan;
2731 2735
2732 globalwinon(ch); 2736 globalwinon(ch);
2733 ch->statusflags |= EMPTYWAIT; 2737 ch->statusflags |= EMPTYWAIT;
diff --git a/drivers/char/epca.h b/drivers/char/epca.h
index 20eeb5a70e1a..456d6c8f94a8 100644
--- a/drivers/char/epca.h
+++ b/drivers/char/epca.h
@@ -128,17 +128,17 @@ struct channel
128 unsigned long c_cflag; 128 unsigned long c_cflag;
129 unsigned long c_lflag; 129 unsigned long c_lflag;
130 unsigned long c_oflag; 130 unsigned long c_oflag;
131 unsigned char *txptr; 131 unsigned char __iomem *txptr;
132 unsigned char *rxptr; 132 unsigned char __iomem *rxptr;
133 unsigned char *tmp_buf; 133 unsigned char *tmp_buf;
134 struct board_info *board; 134 struct board_info *board;
135 struct board_chan *brdchan; 135 struct board_chan __iomem *brdchan;
136 struct digi_struct digiext; 136 struct digi_struct digiext;
137 struct tty_struct *tty; 137 struct tty_struct *tty;
138 wait_queue_head_t open_wait; 138 wait_queue_head_t open_wait;
139 wait_queue_head_t close_wait; 139 wait_queue_head_t close_wait;
140 struct work_struct tqueue; 140 struct work_struct tqueue;
141 struct global_data *mailbox; 141 struct global_data __iomem *mailbox;
142}; 142};
143 143
144struct board_info 144struct board_info
@@ -149,8 +149,8 @@ struct board_info
149 unsigned short numports; 149 unsigned short numports;
150 unsigned long port; 150 unsigned long port;
151 unsigned long membase; 151 unsigned long membase;
152 unsigned char __iomem *re_map_port; 152 void __iomem *re_map_port;
153 unsigned char *re_map_membase; 153 void __iomem *re_map_membase;
154 unsigned long memory_seg; 154 unsigned long memory_seg;
155 void ( * memwinon ) (struct board_info *, unsigned int) ; 155 void ( * memwinon ) (struct board_info *, unsigned int) ;
156 void ( * memwinoff ) (struct board_info *, unsigned int) ; 156 void ( * memwinoff ) (struct board_info *, unsigned int) ;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index de0379b6d502..c055bb630ffc 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -273,7 +273,6 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
273 273
274 vma->vm_flags |= VM_IO; 274 vma->vm_flags |= VM_IO;
275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
276 addr = __pa(addr);
277 276
278 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 277 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
279 PAGE_SIZE, vma->vm_page_prot)) { 278 PAGE_SIZE, vma->vm_page_prot)) {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index cddb789902db..f92177634677 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -839,9 +839,6 @@ int __init hvc_init(void)
839 hvc_driver->flags = TTY_DRIVER_REAL_RAW; 839 hvc_driver->flags = TTY_DRIVER_REAL_RAW;
840 tty_set_operations(hvc_driver, &hvc_ops); 840 tty_set_operations(hvc_driver, &hvc_ops);
841 841
842 if (tty_register_driver(hvc_driver))
843 panic("Couldn't register hvc console driver\n");
844
845 /* Always start the kthread because there can be hotplug vty adapters 842 /* Always start the kthread because there can be hotplug vty adapters
846 * added later. */ 843 * added later. */
847 hvc_task = kthread_run(khvcd, NULL, "khvcd"); 844 hvc_task = kthread_run(khvcd, NULL, "khvcd");
@@ -851,6 +848,9 @@ int __init hvc_init(void)
851 return -EIO; 848 return -EIO;
852 } 849 }
853 850
851 if (tty_register_driver(hvc_driver))
852 panic("Couldn't register hvc console driver\n");
853
854 return 0; 854 return 0;
855} 855}
856module_init(hvc_init); 856module_init(hvc_init);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 463351d4f942..32fa82c78c73 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2620,7 +2620,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2620 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2620 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2621 if (!list_empty(&(intf->waiting_msgs))) { 2621 if (!list_empty(&(intf->waiting_msgs))) {
2622 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2622 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2623 spin_unlock(&(intf->waiting_msgs_lock)); 2623 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2624 goto out_unlock; 2624 goto out_unlock;
2625 } 2625 }
2626 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2626 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
@@ -2629,9 +2629,9 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2629 if (rv > 0) { 2629 if (rv > 0) {
2630 /* Could not handle the message now, just add it to a 2630 /* Could not handle the message now, just add it to a
2631 list to handle later. */ 2631 list to handle later. */
2632 spin_lock(&(intf->waiting_msgs_lock)); 2632 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2633 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2633 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2634 spin_unlock(&(intf->waiting_msgs_lock)); 2634 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2635 } else if (rv == 0) { 2635 } else if (rv == 0) {
2636 ipmi_free_smi_msg(msg); 2636 ipmi_free_smi_msg(msg);
2637 } 2637 }
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index e82a96ba396b..f66947722e12 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -55,7 +55,7 @@ extern void (*pm_power_off)(void);
55static int poweroff_powercycle; 55static int poweroff_powercycle;
56 56
57/* parameter definition to allow user to flag power cycle */ 57/* parameter definition to allow user to flag power cycle */
58module_param(poweroff_powercycle, int, 0); 58module_param(poweroff_powercycle, int, 0644);
59MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); 59MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
60 60
61/* Stuff from the get device id command. */ 61/* Stuff from the get device id command. */
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index 2291a87e8ada..97d6dc24b800 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -229,8 +229,8 @@ static int __init r3964_init(void)
229 TRACE_L("line discipline %d registered", N_R3964); 229 TRACE_L("line discipline %d registered", N_R3964);
230 TRACE_L("flags=%x num=%x", tty_ldisc_N_R3964.flags, 230 TRACE_L("flags=%x num=%x", tty_ldisc_N_R3964.flags,
231 tty_ldisc_N_R3964.num); 231 tty_ldisc_N_R3964.num);
232 TRACE_L("open=%x", (int)tty_ldisc_N_R3964.open); 232 TRACE_L("open=%p", tty_ldisc_N_R3964.open);
233 TRACE_L("tty_ldisc_N_R3964 = %x", (int)&tty_ldisc_N_R3964); 233 TRACE_L("tty_ldisc_N_R3964 = %p", &tty_ldisc_N_R3964);
234 } 234 }
235 else 235 else
236 { 236 {
@@ -267,8 +267,8 @@ static void add_tx_queue(struct r3964_info *pInfo, struct r3964_block_header *pH
267 267
268 spin_unlock_irqrestore(&pInfo->lock, flags); 268 spin_unlock_irqrestore(&pInfo->lock, flags);
269 269
270 TRACE_Q("add_tx_queue %x, length %d, tx_first = %x", 270 TRACE_Q("add_tx_queue %p, length %d, tx_first = %p",
271 (int)pHeader, pHeader->length, (int)pInfo->tx_first ); 271 pHeader, pHeader->length, pInfo->tx_first );
272} 272}
273 273
274static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code) 274static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
@@ -285,10 +285,10 @@ static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
285 return; 285 return;
286 286
287#ifdef DEBUG_QUEUE 287#ifdef DEBUG_QUEUE
288 printk("r3964: remove_from_tx_queue: %x, length %d - ", 288 printk("r3964: remove_from_tx_queue: %p, length %u - ",
289 (int)pHeader, (int)pHeader->length ); 289 pHeader, pHeader->length );
290 for(pDump=pHeader;pDump;pDump=pDump->next) 290 for(pDump=pHeader;pDump;pDump=pDump->next)
291 printk("%x ", (int)pDump); 291 printk("%p ", pDump);
292 printk("\n"); 292 printk("\n");
293#endif 293#endif
294 294
@@ -319,10 +319,10 @@ static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code)
319 spin_unlock_irqrestore(&pInfo->lock, flags); 319 spin_unlock_irqrestore(&pInfo->lock, flags);
320 320
321 kfree(pHeader); 321 kfree(pHeader);
322 TRACE_M("remove_from_tx_queue - kfree %x",(int)pHeader); 322 TRACE_M("remove_from_tx_queue - kfree %p",pHeader);
323 323
324 TRACE_Q("remove_from_tx_queue: tx_first = %x, tx_last = %x", 324 TRACE_Q("remove_from_tx_queue: tx_first = %p, tx_last = %p",
325 (int)pInfo->tx_first, (int)pInfo->tx_last ); 325 pInfo->tx_first, pInfo->tx_last );
326} 326}
327 327
328static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pHeader) 328static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pHeader)
@@ -346,9 +346,9 @@ static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pH
346 346
347 spin_unlock_irqrestore(&pInfo->lock, flags); 347 spin_unlock_irqrestore(&pInfo->lock, flags);
348 348
349 TRACE_Q("add_rx_queue: %x, length = %d, rx_first = %x, count = %d", 349 TRACE_Q("add_rx_queue: %p, length = %d, rx_first = %p, count = %d",
350 (int)pHeader, pHeader->length, 350 pHeader, pHeader->length,
351 (int)pInfo->rx_first, pInfo->blocks_in_rx_queue); 351 pInfo->rx_first, pInfo->blocks_in_rx_queue);
352} 352}
353 353
354static void remove_from_rx_queue(struct r3964_info *pInfo, 354static void remove_from_rx_queue(struct r3964_info *pInfo,
@@ -360,10 +360,10 @@ static void remove_from_rx_queue(struct r3964_info *pInfo,
360 if(pHeader==NULL) 360 if(pHeader==NULL)
361 return; 361 return;
362 362
363 TRACE_Q("remove_from_rx_queue: rx_first = %x, rx_last = %x, count = %d", 363 TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
364 (int)pInfo->rx_first, (int)pInfo->rx_last, pInfo->blocks_in_rx_queue ); 364 pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue );
365 TRACE_Q("remove_from_rx_queue: %x, length %d", 365 TRACE_Q("remove_from_rx_queue: %p, length %u",
366 (int)pHeader, (int)pHeader->length ); 366 pHeader, pHeader->length );
367 367
368 spin_lock_irqsave(&pInfo->lock, flags); 368 spin_lock_irqsave(&pInfo->lock, flags);
369 369
@@ -401,10 +401,10 @@ static void remove_from_rx_queue(struct r3964_info *pInfo,
401 spin_unlock_irqrestore(&pInfo->lock, flags); 401 spin_unlock_irqrestore(&pInfo->lock, flags);
402 402
403 kfree(pHeader); 403 kfree(pHeader);
404 TRACE_M("remove_from_rx_queue - kfree %x",(int)pHeader); 404 TRACE_M("remove_from_rx_queue - kfree %p",pHeader);
405 405
406 TRACE_Q("remove_from_rx_queue: rx_first = %x, rx_last = %x, count = %d", 406 TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d",
407 (int)pInfo->rx_first, (int)pInfo->rx_last, pInfo->blocks_in_rx_queue ); 407 pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue );
408} 408}
409 409
410static void put_char(struct r3964_info *pInfo, unsigned char ch) 410static void put_char(struct r3964_info *pInfo, unsigned char ch)
@@ -506,8 +506,8 @@ static void transmit_block(struct r3964_info *pInfo)
506 if(tty->driver->write_room) 506 if(tty->driver->write_room)
507 room=tty->driver->write_room(tty); 507 room=tty->driver->write_room(tty);
508 508
509 TRACE_PS("transmit_block %x, room %d, length %d", 509 TRACE_PS("transmit_block %p, room %d, length %d",
510 (int)pBlock, room, pBlock->length); 510 pBlock, room, pBlock->length);
511 511
512 while(pInfo->tx_position < pBlock->length) 512 while(pInfo->tx_position < pBlock->length)
513 { 513 {
@@ -588,7 +588,7 @@ static void on_receive_block(struct r3964_info *pInfo)
588 588
589 /* prepare struct r3964_block_header: */ 589 /* prepare struct r3964_block_header: */
590 pBlock = kmalloc(length+sizeof(struct r3964_block_header), GFP_KERNEL); 590 pBlock = kmalloc(length+sizeof(struct r3964_block_header), GFP_KERNEL);
591 TRACE_M("on_receive_block - kmalloc %x",(int)pBlock); 591 TRACE_M("on_receive_block - kmalloc %p",pBlock);
592 592
593 if(pBlock==NULL) 593 if(pBlock==NULL)
594 return; 594 return;
@@ -868,11 +868,11 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
868 if(pMsg) 868 if(pMsg)
869 { 869 {
870 kfree(pMsg); 870 kfree(pMsg);
871 TRACE_M("enable_signals - msg kfree %x",(int)pMsg); 871 TRACE_M("enable_signals - msg kfree %p",pMsg);
872 } 872 }
873 } 873 }
874 kfree(pClient); 874 kfree(pClient);
875 TRACE_M("enable_signals - kfree %x",(int)pClient); 875 TRACE_M("enable_signals - kfree %p",pClient);
876 return 0; 876 return 0;
877 } 877 }
878 } 878 }
@@ -890,7 +890,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg)
890 { 890 {
891 /* add client to client list */ 891 /* add client to client list */
892 pClient=kmalloc(sizeof(struct r3964_client_info), GFP_KERNEL); 892 pClient=kmalloc(sizeof(struct r3964_client_info), GFP_KERNEL);
893 TRACE_M("enable_signals - kmalloc %x",(int)pClient); 893 TRACE_M("enable_signals - kmalloc %p",pClient);
894 if(pClient==NULL) 894 if(pClient==NULL)
895 return -ENOMEM; 895 return -ENOMEM;
896 896
@@ -954,7 +954,7 @@ static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg,
954queue_the_message: 954queue_the_message:
955 955
956 pMsg = kmalloc(sizeof(struct r3964_message), GFP_KERNEL); 956 pMsg = kmalloc(sizeof(struct r3964_message), GFP_KERNEL);
957 TRACE_M("add_msg - kmalloc %x",(int)pMsg); 957 TRACE_M("add_msg - kmalloc %p",pMsg);
958 if(pMsg==NULL) { 958 if(pMsg==NULL) {
959 return; 959 return;
960 } 960 }
@@ -1067,11 +1067,11 @@ static int r3964_open(struct tty_struct *tty)
1067 struct r3964_info *pInfo; 1067 struct r3964_info *pInfo;
1068 1068
1069 TRACE_L("open"); 1069 TRACE_L("open");
1070 TRACE_L("tty=%x, PID=%d, disc_data=%x", 1070 TRACE_L("tty=%p, PID=%d, disc_data=%p",
1071 (int)tty, current->pid, (int)tty->disc_data); 1071 tty, current->pid, tty->disc_data);
1072 1072
1073 pInfo=kmalloc(sizeof(struct r3964_info), GFP_KERNEL); 1073 pInfo=kmalloc(sizeof(struct r3964_info), GFP_KERNEL);
1074 TRACE_M("r3964_open - info kmalloc %x",(int)pInfo); 1074 TRACE_M("r3964_open - info kmalloc %p",pInfo);
1075 1075
1076 if(!pInfo) 1076 if(!pInfo)
1077 { 1077 {
@@ -1080,26 +1080,26 @@ static int r3964_open(struct tty_struct *tty)
1080 } 1080 }
1081 1081
1082 pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL); 1082 pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL);
1083 TRACE_M("r3964_open - rx_buf kmalloc %x",(int)pInfo->rx_buf); 1083 TRACE_M("r3964_open - rx_buf kmalloc %p",pInfo->rx_buf);
1084 1084
1085 if(!pInfo->rx_buf) 1085 if(!pInfo->rx_buf)
1086 { 1086 {
1087 printk(KERN_ERR "r3964: failed to alloc receive buffer\n"); 1087 printk(KERN_ERR "r3964: failed to alloc receive buffer\n");
1088 kfree(pInfo); 1088 kfree(pInfo);
1089 TRACE_M("r3964_open - info kfree %x",(int)pInfo); 1089 TRACE_M("r3964_open - info kfree %p",pInfo);
1090 return -ENOMEM; 1090 return -ENOMEM;
1091 } 1091 }
1092 1092
1093 pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL); 1093 pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL);
1094 TRACE_M("r3964_open - tx_buf kmalloc %x",(int)pInfo->tx_buf); 1094 TRACE_M("r3964_open - tx_buf kmalloc %p",pInfo->tx_buf);
1095 1095
1096 if(!pInfo->tx_buf) 1096 if(!pInfo->tx_buf)
1097 { 1097 {
1098 printk(KERN_ERR "r3964: failed to alloc transmit buffer\n"); 1098 printk(KERN_ERR "r3964: failed to alloc transmit buffer\n");
1099 kfree(pInfo->rx_buf); 1099 kfree(pInfo->rx_buf);
1100 TRACE_M("r3964_open - rx_buf kfree %x",(int)pInfo->rx_buf); 1100 TRACE_M("r3964_open - rx_buf kfree %p",pInfo->rx_buf);
1101 kfree(pInfo); 1101 kfree(pInfo);
1102 TRACE_M("r3964_open - info kfree %x",(int)pInfo); 1102 TRACE_M("r3964_open - info kfree %p",pInfo);
1103 return -ENOMEM; 1103 return -ENOMEM;
1104 } 1104 }
1105 1105
@@ -1154,11 +1154,11 @@ static void r3964_close(struct tty_struct *tty)
1154 if(pMsg) 1154 if(pMsg)
1155 { 1155 {
1156 kfree(pMsg); 1156 kfree(pMsg);
1157 TRACE_M("r3964_close - msg kfree %x",(int)pMsg); 1157 TRACE_M("r3964_close - msg kfree %p",pMsg);
1158 } 1158 }
1159 } 1159 }
1160 kfree(pClient); 1160 kfree(pClient);
1161 TRACE_M("r3964_close - client kfree %x",(int)pClient); 1161 TRACE_M("r3964_close - client kfree %p",pClient);
1162 pClient=pNext; 1162 pClient=pNext;
1163 } 1163 }
1164 /* Remove jobs from tx_queue: */ 1164 /* Remove jobs from tx_queue: */
@@ -1177,11 +1177,11 @@ static void r3964_close(struct tty_struct *tty)
1177 /* Free buffers: */ 1177 /* Free buffers: */
1178 wake_up_interruptible(&pInfo->read_wait); 1178 wake_up_interruptible(&pInfo->read_wait);
1179 kfree(pInfo->rx_buf); 1179 kfree(pInfo->rx_buf);
1180 TRACE_M("r3964_close - rx_buf kfree %x",(int)pInfo->rx_buf); 1180 TRACE_M("r3964_close - rx_buf kfree %p",pInfo->rx_buf);
1181 kfree(pInfo->tx_buf); 1181 kfree(pInfo->tx_buf);
1182 TRACE_M("r3964_close - tx_buf kfree %x",(int)pInfo->tx_buf); 1182 TRACE_M("r3964_close - tx_buf kfree %p",pInfo->tx_buf);
1183 kfree(pInfo); 1183 kfree(pInfo);
1184 TRACE_M("r3964_close - info kfree %x",(int)pInfo); 1184 TRACE_M("r3964_close - info kfree %p",pInfo);
1185} 1185}
1186 1186
1187static ssize_t r3964_read(struct tty_struct *tty, struct file *file, 1187static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
@@ -1234,7 +1234,7 @@ repeat:
1234 count = sizeof(struct r3964_client_message); 1234 count = sizeof(struct r3964_client_message);
1235 1235
1236 kfree(pMsg); 1236 kfree(pMsg);
1237 TRACE_M("r3964_read - msg kfree %x",(int)pMsg); 1237 TRACE_M("r3964_read - msg kfree %p",pMsg);
1238 1238
1239 if (copy_to_user(buf,&theMsg, count)) 1239 if (copy_to_user(buf,&theMsg, count))
1240 return -EFAULT; 1240 return -EFAULT;
@@ -1279,7 +1279,7 @@ static ssize_t r3964_write(struct tty_struct * tty, struct file * file,
1279 * Allocate a buffer for the data and copy it from the buffer with header prepended 1279 * Allocate a buffer for the data and copy it from the buffer with header prepended
1280 */ 1280 */
1281 new_data = kmalloc (count+sizeof(struct r3964_block_header), GFP_KERNEL); 1281 new_data = kmalloc (count+sizeof(struct r3964_block_header), GFP_KERNEL);
1282 TRACE_M("r3964_write - kmalloc %x",(int)new_data); 1282 TRACE_M("r3964_write - kmalloc %p",new_data);
1283 if (new_data == NULL) { 1283 if (new_data == NULL) {
1284 if (pInfo->flags & R3964_DEBUG) 1284 if (pInfo->flags & R3964_DEBUG)
1285 { 1285 {
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1e33cb032e07..e91268e86833 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -810,13 +810,14 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
810 * from the top and bottom of cursor position 810 * from the top and bottom of cursor position
811 */ 811 */
812 old_origin += (vc->vc_y - new_rows/2) * old_row_size; 812 old_origin += (vc->vc_y - new_rows/2) * old_row_size;
813 end = old_origin + new_screen_size; 813 end = old_origin + (old_row_size * new_rows);
814 } 814 }
815 } else 815 } else
816 /* 816 /*
817 * Cursor near the top, copy contents from the top of buffer 817 * Cursor near the top, copy contents from the top of buffer
818 */ 818 */
819 end = (old_rows > new_rows) ? old_origin + new_screen_size : 819 end = (old_rows > new_rows) ? old_origin +
820 (old_row_size * new_rows) :
820 vc->vc_scr_end; 821 vc->vc_scr_end;
821 822
822 update_attr(vc); 823 update_attr(vc);
diff --git a/drivers/char/watchdog/Kconfig b/drivers/char/watchdog/Kconfig
index fa789ea36bbe..344001b45af9 100644
--- a/drivers/char/watchdog/Kconfig
+++ b/drivers/char/watchdog/Kconfig
@@ -84,6 +84,17 @@ config 977_WATCHDOG
84 84
85 Not sure? It's safe to say N. 85 Not sure? It's safe to say N.
86 86
87config IXP2000_WATCHDOG
88 tristate "IXP2000 Watchdog"
89 depends on WATCHDOG && ARCH_IXP2000
90 help
91 Say Y here if to include support for the watchdog timer
92 in the Intel IXP2000(2400, 2800, 2850) network processors.
93 This driver can be built as a module by choosing M. The module
94 will be called ixp2000_wdt.
95
96 Say N if you are unsure.
97
87config IXP4XX_WATCHDOG 98config IXP4XX_WATCHDOG
88 tristate "IXP4xx Watchdog" 99 tristate "IXP4xx Watchdog"
89 depends on WATCHDOG && ARCH_IXP4XX 100 depends on WATCHDOG && ARCH_IXP4XX
@@ -100,17 +111,6 @@ config IXP4XX_WATCHDOG
100 111
101 Say N if you are unsure. 112 Say N if you are unsure.
102 113
103config IXP2000_WATCHDOG
104 tristate "IXP2000 Watchdog"
105 depends on WATCHDOG && ARCH_IXP2000
106 help
107 Say Y here if to include support for the watchdog timer
108 in the Intel IXP2000(2400, 2800, 2850) network processors.
109 This driver can be built as a module by choosing M. The module
110 will be called ixp2000_wdt.
111
112 Say N if you are unsure.
113
114config S3C2410_WATCHDOG 114config S3C2410_WATCHDOG
115 tristate "S3C2410 Watchdog" 115 tristate "S3C2410 Watchdog"
116 depends on WATCHDOG && ARCH_S3C2410 116 depends on WATCHDOG && ARCH_S3C2410
@@ -233,6 +233,16 @@ config IB700_WDT
233 233
234 Most people will say N. 234 Most people will say N.
235 235
236config IBMASR
237 tristate "IBM Automatic Server Restart"
238 depends on WATCHDOG && X86
239 help
240 This is the driver for the IBM Automatic Server Restart watchdog
241 timer builtin into some eServer xSeries machines.
242
243 To compile this driver as a module, choose M here: the
244 module will be called ibmasr.
245
236config WAFER_WDT 246config WAFER_WDT
237 tristate "ICP Wafer 5823 Single Board Computer Watchdog" 247 tristate "ICP Wafer 5823 Single Board Computer Watchdog"
238 depends on WATCHDOG && X86 248 depends on WATCHDOG && X86
@@ -243,6 +253,16 @@ config WAFER_WDT
243 To compile this driver as a module, choose M here: the 253 To compile this driver as a module, choose M here: the
244 module will be called wafer5823wdt. 254 module will be called wafer5823wdt.
245 255
256config I6300ESB_WDT
257 tristate "Intel 6300ESB Timer/Watchdog"
258 depends on WATCHDOG && X86 && PCI
259 ---help---
260 Hardware driver for the watchdog timer built into the Intel
261 6300ESB controller hub.
262
263 To compile this driver as a module, choose M here: the
264 module will be called i6300esb.
265
246config I8XX_TCO 266config I8XX_TCO
247 tristate "Intel i8xx TCO Timer/Watchdog" 267 tristate "Intel i8xx TCO Timer/Watchdog"
248 depends on WATCHDOG && (X86 || IA64) && PCI 268 depends on WATCHDOG && (X86 || IA64) && PCI
@@ -298,6 +318,19 @@ config 60XX_WDT
298 You can compile this driver directly into the kernel, or use 318 You can compile this driver directly into the kernel, or use
299 it as a module. The module will be called sbc60xxwdt. 319 it as a module. The module will be called sbc60xxwdt.
300 320
321config SBC8360_WDT
322 tristate "SBC8360 Watchdog Timer"
323 depends on WATCHDOG && X86
324 ---help---
325
326 This is the driver for the hardware watchdog on the SBC8360 Single
327 Board Computer produced by Axiomtek Co., Ltd. (www.axiomtek.com).
328
329 To compile this driver as a module, choose M here: the
330 module will be called sbc8360.ko.
331
332 Most people will say N.
333
301config CPU5_WDT 334config CPU5_WDT
302 tristate "SMA CPU5 Watchdog" 335 tristate "SMA CPU5 Watchdog"
303 depends on WATCHDOG && X86 336 depends on WATCHDOG && X86
@@ -336,6 +369,19 @@ config W83877F_WDT
336 369
337 Most people will say N. 370 Most people will say N.
338 371
372config W83977F_WDT
373 tristate "W83977F (PCM-5335) Watchdog Timer"
374 depends on WATCHDOG && X86
375 ---help---
376 This is the driver for the hardware watchdog on the W83977F I/O chip
377 as used in AAEON's PCM-5335 SBC (and likely others). This
378 watchdog simply watches your kernel to make sure it doesn't freeze,
379 and if it does, it reboots your computer after a certain amount of
380 time.
381
382 To compile this driver as a module, choose M here: the
383 module will be called w83977f_wdt.
384
339config MACHZ_WDT 385config MACHZ_WDT
340 tristate "ZF MachZ Watchdog" 386 tristate "ZF MachZ Watchdog"
341 depends on WATCHDOG && X86 387 depends on WATCHDOG && X86
@@ -355,6 +401,10 @@ config 8xx_WDT
355 tristate "MPC8xx Watchdog Timer" 401 tristate "MPC8xx Watchdog Timer"
356 depends on WATCHDOG && 8xx 402 depends on WATCHDOG && 8xx
357 403
404config MV64X60_WDT
405 tristate "MV64X60 (Marvell Discovery) Watchdog Timer"
406 depends on WATCHDOG && MV64X60
407
358config BOOKE_WDT 408config BOOKE_WDT
359 tristate "PowerPC Book-E Watchdog Timer" 409 tristate "PowerPC Book-E Watchdog Timer"
360 depends on WATCHDOG && (BOOKE || 4xx) 410 depends on WATCHDOG && (BOOKE || 4xx)
@@ -362,6 +412,17 @@ config BOOKE_WDT
362 Please see Documentation/watchdog/watchdog-api.txt for 412 Please see Documentation/watchdog/watchdog-api.txt for
363 more information. 413 more information.
364 414
415# PPC64 Architecture
416
417config WATCHDOG_RTAS
418 tristate "RTAS watchdog"
419 depends on WATCHDOG && PPC_RTAS
420 help
421 This driver adds watchdog support for the RTAS watchdog.
422
423 To compile this driver as a module, choose M here. The module
424 will be called wdrtas.
425
365# MIPS Architecture 426# MIPS Architecture
366 427
367config INDYDOG 428config INDYDOG
@@ -430,16 +491,6 @@ config WATCHDOG_RIO
430 machines. The watchdog timeout period is normally one minute but 491 machines. The watchdog timeout period is normally one minute but
431 can be changed with a boot-time parameter. 492 can be changed with a boot-time parameter.
432 493
433# ppc64 RTAS watchdog
434config WATCHDOG_RTAS
435 tristate "RTAS watchdog"
436 depends on WATCHDOG && PPC_RTAS
437 help
438 This driver adds watchdog support for the RTAS watchdog.
439
440 To compile this driver as a module, choose M here. The module
441 will be called wdrtas.
442
443# 494#
444# ISA-based Watchdog Cards 495# ISA-based Watchdog Cards
445# 496#
diff --git a/drivers/char/watchdog/Makefile b/drivers/char/watchdog/Makefile
index bc6f5fe88c8c..cfd0a3987710 100644
--- a/drivers/char/watchdog/Makefile
+++ b/drivers/char/watchdog/Makefile
@@ -39,22 +39,27 @@ obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
39obj-$(CONFIG_SC520_WDT) += sc520_wdt.o 39obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
40obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o 40obj-$(CONFIG_EUROTECH_WDT) += eurotechwdt.o
41obj-$(CONFIG_IB700_WDT) += ib700wdt.o 41obj-$(CONFIG_IB700_WDT) += ib700wdt.o
42obj-$(CONFIG_IBMASR) += ibmasr.o
42obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o 43obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
44obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
43obj-$(CONFIG_I8XX_TCO) += i8xx_tco.o 45obj-$(CONFIG_I8XX_TCO) += i8xx_tco.o
44obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o 46obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
45obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o 47obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
46obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o 48obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
49obj-$(CONFIG_SBC8360_WDT) += sbc8360.o
47obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o 50obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o
48obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o 51obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o
49obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o 52obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
53obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
50obj-$(CONFIG_MACHZ_WDT) += machzwd.o 54obj-$(CONFIG_MACHZ_WDT) += machzwd.o
51 55
52# PowerPC Architecture 56# PowerPC Architecture
53obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o 57obj-$(CONFIG_8xx_WDT) += mpc8xx_wdt.o
58obj-$(CONFIG_MV64X60_WDT) += mv64x60_wdt.o
59obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
54 60
55# PPC64 Architecture 61# PPC64 Architecture
56obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o 62obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
57obj-$(CONFIG_BOOKE_WDT) += booke_wdt.o
58 63
59# MIPS Architecture 64# MIPS Architecture
60obj-$(CONFIG_INDYDOG) += indydog.o 65obj-$(CONFIG_INDYDOG) += indydog.o
diff --git a/drivers/char/watchdog/i6300esb.c b/drivers/char/watchdog/i6300esb.c
new file mode 100644
index 000000000000..93785f13242e
--- /dev/null
+++ b/drivers/char/watchdog/i6300esb.c
@@ -0,0 +1,527 @@
1/*
2 * i6300esb: Watchdog timer driver for Intel 6300ESB chipset
3 *
4 * (c) Copyright 2004 Google Inc.
5 * (c) Copyright 2005 David Härdeman <david@2gen.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * based on i810-tco.c which is in turn based on softdog.c
13 *
14 * The timer is implemented in the following I/O controller hubs:
15 * (See the intel documentation on http://developer.intel.com.)
16 * 6300ESB chip : document number 300641-003
17 *
18 * 2004YYZZ Ross Biro
19 * Initial version 0.01
20 * 2004YYZZ Ross Biro
21 * Version 0.02
22 * 20050210 David Härdeman <david@2gen.com>
23 * Ported driver to kernel 2.6
24 */
25
26/*
27 * Includes, defines, variables, module parameters, ...
28 */
29
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/kernel.h>
33#include <linux/fs.h>
34#include <linux/mm.h>
35#include <linux/miscdevice.h>
36#include <linux/watchdog.h>
37#include <linux/reboot.h>
38#include <linux/init.h>
39#include <linux/pci.h>
40#include <linux/ioport.h>
41
42#include <asm/uaccess.h>
43#include <asm/io.h>
44
45/* Module and version information */
46#define ESB_VERSION "0.03"
47#define ESB_MODULE_NAME "i6300ESB timer"
48#define ESB_DRIVER_NAME ESB_MODULE_NAME ", v" ESB_VERSION
49#define PFX ESB_MODULE_NAME ": "
50
51/* PCI configuration registers */
52#define ESB_CONFIG_REG 0x60 /* Config register */
53#define ESB_LOCK_REG 0x68 /* WDT lock register */
54
55/* Memory mapped registers */
56#define ESB_TIMER1_REG BASEADDR + 0x00 /* Timer1 value after each reset */
57#define ESB_TIMER2_REG BASEADDR + 0x04 /* Timer2 value after each reset */
58#define ESB_GINTSR_REG BASEADDR + 0x08 /* General Interrupt Status Register */
59#define ESB_RELOAD_REG BASEADDR + 0x0c /* Reload register */
60
61/* Lock register bits */
62#define ESB_WDT_FUNC ( 0x01 << 2 ) /* Watchdog functionality */
63#define ESB_WDT_ENABLE ( 0x01 << 1 ) /* Enable WDT */
64#define ESB_WDT_LOCK ( 0x01 << 0 ) /* Lock (nowayout) */
65
66/* Config register bits */
67#define ESB_WDT_REBOOT ( 0x01 << 5 ) /* Enable reboot on timeout */
68#define ESB_WDT_FREQ ( 0x01 << 2 ) /* Decrement frequency */
69#define ESB_WDT_INTTYPE ( 0x11 << 0 ) /* Interrupt type on timer1 timeout */
70
71/* Reload register bits */
72#define ESB_WDT_RELOAD ( 0x01 << 8 ) /* prevent timeout */
73
74/* Magic constants */
75#define ESB_UNLOCK1 0x80 /* Step 1 to unlock reset registers */
76#define ESB_UNLOCK2 0x86 /* Step 2 to unlock reset registers */
77
78/* internal variables */
79static void __iomem *BASEADDR;
80static spinlock_t esb_lock; /* Guards the hardware */
81static unsigned long timer_alive;
82static struct pci_dev *esb_pci;
83static unsigned short triggered; /* The status of the watchdog upon boot */
84static char esb_expect_close;
85
86/* module parameters */
87#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat (1<heartbeat<2*1023) */
88static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
89module_param(heartbeat, int, 0);
90MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (1<heartbeat<2046, default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
91
92static int nowayout = WATCHDOG_NOWAYOUT;
93module_param(nowayout, int, 0);
94MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
95
96/*
97 * Some i6300ESB specific functions
98 */
99
100/*
101 * Prepare for reloading the timer by unlocking the proper registers.
102 * This is performed by first writing 0x80 followed by 0x86 to the
103 * reload register. After this the appropriate registers can be written
104 * to once before they need to be unlocked again.
105 */
106static inline void esb_unlock_registers(void) {
107 writeb(ESB_UNLOCK1, ESB_RELOAD_REG);
108 writeb(ESB_UNLOCK2, ESB_RELOAD_REG);
109}
110
111static void esb_timer_start(void)
112{
113 u8 val;
114
115 /* Enable or Enable + Lock? */
116 val = 0x02 | (nowayout ? 0x01 : 0x00);
117
118 pci_write_config_byte(esb_pci, ESB_LOCK_REG, val);
119}
120
121static int esb_timer_stop(void)
122{
123 u8 val;
124
125 spin_lock(&esb_lock);
126 /* First, reset timers as suggested by the docs */
127 esb_unlock_registers();
128 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
129 /* Then disable the WDT */
130 pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x0);
131 pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val);
132 spin_unlock(&esb_lock);
133
134 /* Returns 0 if the timer was disabled, non-zero otherwise */
135 return (val & 0x01);
136}
137
138static void esb_timer_keepalive(void)
139{
140 spin_lock(&esb_lock);
141 esb_unlock_registers();
142 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
143 /* FIXME: Do we need to flush anything here? */
144 spin_unlock(&esb_lock);
145}
146
147static int esb_timer_set_heartbeat(int time)
148{
149 u32 val;
150
151 if (time < 0x1 || time > (2 * 0x03ff))
152 return -EINVAL;
153
154 spin_lock(&esb_lock);
155
156 /* We shift by 9, so if we are passed a value of 1 sec,
157 * val will be 1 << 9 = 512, then write that to two
158 * timers => 2 * 512 = 1024 (which is decremented at 1KHz)
159 */
160 val = time << 9;
161
162 /* Write timer 1 */
163 esb_unlock_registers();
164 writel(val, ESB_TIMER1_REG);
165
166 /* Write timer 2 */
167 esb_unlock_registers();
168 writel(val, ESB_TIMER2_REG);
169
170 /* Reload */
171 esb_unlock_registers();
172 writew(ESB_WDT_RELOAD, ESB_RELOAD_REG);
173
174 /* FIXME: Do we need to flush everything out? */
175
176 /* Done */
177 heartbeat = time;
178 spin_unlock(&esb_lock);
179 return 0;
180}
181
182static int esb_timer_read (void)
183{
184 u32 count;
185
186 /* This isn't documented, and doesn't take into
187 * acount which stage is running, but it looks
188 * like a 20 bit count down, so we might as well report it.
189 */
190 pci_read_config_dword(esb_pci, 0x64, &count);
191 return (int)count;
192}
193
194/*
195 * /dev/watchdog handling
196 */
197
198static int esb_open (struct inode *inode, struct file *file)
199{
200 /* /dev/watchdog can only be opened once */
201 if (test_and_set_bit(0, &timer_alive))
202 return -EBUSY;
203
204 /* Reload and activate timer */
205 esb_timer_keepalive ();
206 esb_timer_start ();
207
208 return nonseekable_open(inode, file);
209}
210
211static int esb_release (struct inode *inode, struct file *file)
212{
213 /* Shut off the timer. */
214 if (esb_expect_close == 42) {
215 esb_timer_stop ();
216 } else {
217 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
218 esb_timer_keepalive ();
219 }
220 clear_bit(0, &timer_alive);
221 esb_expect_close = 0;
222 return 0;
223}
224
225static ssize_t esb_write (struct file *file, const char __user *data,
226 size_t len, loff_t * ppos)
227{
228 /* See if we got the magic character 'V' and reload the timer */
229 if (len) {
230 if (!nowayout) {
231 size_t i;
232
233 /* note: just in case someone wrote the magic character
234 * five months ago... */
235 esb_expect_close = 0;
236
237 /* scan to see whether or not we got the magic character */
238 for (i = 0; i != len; i++) {
239 char c;
240 if(get_user(c, data+i))
241 return -EFAULT;
242 if (c == 'V')
243 esb_expect_close = 42;
244 }
245 }
246
247 /* someone wrote to us, we should reload the timer */
248 esb_timer_keepalive ();
249 }
250 return len;
251}
252
253static int esb_ioctl (struct inode *inode, struct file *file,
254 unsigned int cmd, unsigned long arg)
255{
256 int new_options, retval = -EINVAL;
257 int new_heartbeat;
258 void __user *argp = (void __user *)arg;
259 int __user *p = argp;
260 static struct watchdog_info ident = {
261 .options = WDIOF_SETTIMEOUT |
262 WDIOF_KEEPALIVEPING |
263 WDIOF_MAGICCLOSE,
264 .firmware_version = 0,
265 .identity = ESB_MODULE_NAME,
266 };
267
268 switch (cmd) {
269 case WDIOC_GETSUPPORT:
270 return copy_to_user(argp, &ident,
271 sizeof (ident)) ? -EFAULT : 0;
272
273 case WDIOC_GETSTATUS:
274 return put_user (esb_timer_read(), p);
275
276 case WDIOC_GETBOOTSTATUS:
277 return put_user (triggered, p);
278
279 case WDIOC_KEEPALIVE:
280 esb_timer_keepalive ();
281 return 0;
282
283 case WDIOC_SETOPTIONS:
284 {
285 if (get_user (new_options, p))
286 return -EFAULT;
287
288 if (new_options & WDIOS_DISABLECARD) {
289 esb_timer_stop ();
290 retval = 0;
291 }
292
293 if (new_options & WDIOS_ENABLECARD) {
294 esb_timer_keepalive ();
295 esb_timer_start ();
296 retval = 0;
297 }
298
299 return retval;
300 }
301
302 case WDIOC_SETTIMEOUT:
303 {
304 if (get_user(new_heartbeat, p))
305 return -EFAULT;
306
307 if (esb_timer_set_heartbeat(new_heartbeat))
308 return -EINVAL;
309
310 esb_timer_keepalive ();
311 /* Fall */
312 }
313
314 case WDIOC_GETTIMEOUT:
315 return put_user(heartbeat, p);
316
317 default:
318 return -ENOIOCTLCMD;
319 }
320}
321
322/*
323 * Notify system
324 */
325
326static int esb_notify_sys (struct notifier_block *this, unsigned long code, void *unused)
327{
328 if (code==SYS_DOWN || code==SYS_HALT) {
329 /* Turn the WDT off */
330 esb_timer_stop ();
331 }
332
333 return NOTIFY_DONE;
334}
335
336/*
337 * Kernel Interfaces
338 */
339
340static struct file_operations esb_fops = {
341 .owner = THIS_MODULE,
342 .llseek = no_llseek,
343 .write = esb_write,
344 .ioctl = esb_ioctl,
345 .open = esb_open,
346 .release = esb_release,
347};
348
349static struct miscdevice esb_miscdev = {
350 .minor = WATCHDOG_MINOR,
351 .name = "watchdog",
352 .fops = &esb_fops,
353};
354
355static struct notifier_block esb_notifier = {
356 .notifier_call = esb_notify_sys,
357};
358
359/*
360 * Data for PCI driver interface
361 *
362 * This data only exists for exporting the supported
363 * PCI ids via MODULE_DEVICE_TABLE. We do not actually
364 * register a pci_driver, because someone else might one day
365 * want to register another driver on the same PCI id.
366 */
367static struct pci_device_id esb_pci_tbl[] = {
368 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
369 { 0, }, /* End of list */
370};
371MODULE_DEVICE_TABLE (pci, esb_pci_tbl);
372
373/*
374 * Init & exit routines
375 */
376
377static unsigned char __init esb_getdevice (void)
378{
379 u8 val1;
380 unsigned short val2;
381
382 struct pci_dev *dev = NULL;
383 /*
384 * Find the PCI device
385 */
386
387 for_each_pci_dev(dev) {
388 if (pci_match_id(esb_pci_tbl, dev)) {
389 esb_pci = dev;
390 break;
391 }
392 }
393
394 if (esb_pci) {
395 if (pci_enable_device(esb_pci)) {
396 printk (KERN_ERR PFX "failed to enable device\n");
397 goto err_devput;
398 }
399
400 if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) {
401 printk (KERN_ERR PFX "failed to request region\n");
402 goto err_disable;
403 }
404
405 BASEADDR = ioremap(pci_resource_start(esb_pci, 0),
406 pci_resource_len(esb_pci, 0));
407 if (BASEADDR == NULL) {
408 /* Something's wrong here, BASEADDR has to be set */
409 printk (KERN_ERR PFX "failed to get BASEADDR\n");
410 goto err_release;
411 }
412
413 /*
414 * The watchdog has two timers, it can be setup so that the
415 * expiry of timer1 results in an interrupt and the expiry of
416 * timer2 results in a reboot. We set it to not generate
417 * any interrupts as there is not much we can do with it
418 * right now.
419 *
420 * We also enable reboots and set the timer frequency to
421 * the PCI clock divided by 2^15 (approx 1KHz).
422 */
423 pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003);
424
425 /* Check that the WDT isn't already locked */
426 pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1);
427 if (val1 & ESB_WDT_LOCK)
428 printk (KERN_WARNING PFX "nowayout already set\n");
429
430 /* Set the timer to watchdog mode and disable it for now */
431 pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00);
432
433 /* Check if the watchdog was previously triggered */
434 esb_unlock_registers();
435 val2 = readw(ESB_RELOAD_REG);
436 triggered = (val2 & (0x01 << 9) >> 9);
437
438 /* Reset trigger flag and timers */
439 esb_unlock_registers();
440 writew((0x11 << 8), ESB_RELOAD_REG);
441
442 /* Done */
443 return 1;
444
445err_release:
446 pci_release_region(esb_pci, 0);
447err_disable:
448 pci_disable_device(esb_pci);
449err_devput:
450 pci_dev_put(esb_pci);
451 }
452 return 0;
453}
454
455static int __init watchdog_init (void)
456{
457 int ret;
458
459 spin_lock_init(&esb_lock);
460
461 /* Check whether or not the hardware watchdog is there */
462 if (!esb_getdevice () || esb_pci == NULL)
463 return -ENODEV;
464
465 /* Check that the heartbeat value is within it's range ; if not reset to the default */
466 if (esb_timer_set_heartbeat (heartbeat)) {
467 esb_timer_set_heartbeat (WATCHDOG_HEARTBEAT);
468 printk(KERN_INFO PFX "heartbeat value must be 1<heartbeat<2046, using %d\n",
469 heartbeat);
470 }
471
472 ret = register_reboot_notifier(&esb_notifier);
473 if (ret != 0) {
474 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n",
475 ret);
476 goto err_unmap;
477 }
478
479 ret = misc_register(&esb_miscdev);
480 if (ret != 0) {
481 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
482 WATCHDOG_MINOR, ret);
483 goto err_notifier;
484 }
485
486 esb_timer_stop ();
487
488 printk (KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
489 BASEADDR, heartbeat, nowayout);
490
491 return 0;
492
493err_notifier:
494 unregister_reboot_notifier(&esb_notifier);
495err_unmap:
496 iounmap(BASEADDR);
497/* err_release: */
498 pci_release_region(esb_pci, 0);
499/* err_disable: */
500 pci_disable_device(esb_pci);
501/* err_devput: */
502 pci_dev_put(esb_pci);
503 return ret;
504}
505
506static void __exit watchdog_cleanup (void)
507{
508 /* Stop the timer before we leave */
509 if (!nowayout)
510 esb_timer_stop ();
511
512 /* Deregister */
513 misc_deregister(&esb_miscdev);
514 unregister_reboot_notifier(&esb_notifier);
515 iounmap(BASEADDR);
516 pci_release_region(esb_pci, 0);
517 pci_disable_device(esb_pci);
518 pci_dev_put(esb_pci);
519}
520
521module_init(watchdog_init);
522module_exit(watchdog_cleanup);
523
524MODULE_AUTHOR("Ross Biro and David Härdeman");
525MODULE_DESCRIPTION("Watchdog driver for Intel 6300ESB chipsets");
526MODULE_LICENSE("GPL");
527MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/ibmasr.c b/drivers/char/watchdog/ibmasr.c
new file mode 100644
index 000000000000..294c474ae485
--- /dev/null
+++ b/drivers/char/watchdog/ibmasr.c
@@ -0,0 +1,405 @@
1/*
2 * IBM Automatic Server Restart driver.
3 *
4 * Copyright (c) 2005 Andrey Panin <pazke@donpac.ru>
5 *
6 * Based on driver written by Pete Reynolds.
7 * Copyright (c) IBM Corporation, 1998-2004.
8 *
9 * This software may be used and distributed according to the terms
10 * of the GNU Public License, incorporated herein by reference.
11 */
12
13#include <linux/config.h>
14#include <linux/fs.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/timer.h>
20#include <linux/miscdevice.h>
21#include <linux/watchdog.h>
22#include <linux/dmi.h>
23
24#include <asm/io.h>
25#include <asm/uaccess.h>
26
27
28enum {
29 ASMTYPE_UNKNOWN,
30 ASMTYPE_TOPAZ,
31 ASMTYPE_JASPER,
32 ASMTYPE_PEARL,
33 ASMTYPE_JUNIPER,
34 ASMTYPE_SPRUCE,
35};
36
37#define PFX "ibmasr: "
38
39#define TOPAZ_ASR_REG_OFFSET 4
40#define TOPAZ_ASR_TOGGLE 0x40
41#define TOPAZ_ASR_DISABLE 0x80
42
43/* PEARL ASR S/W REGISTER SUPERIO PORT ADDRESSES */
44#define PEARL_BASE 0xe04
45#define PEARL_WRITE 0xe06
46#define PEARL_READ 0xe07
47
48#define PEARL_ASR_DISABLE_MASK 0x80 /* bit 7: disable = 1, enable = 0 */
49#define PEARL_ASR_TOGGLE_MASK 0x40 /* bit 6: 0, then 1, then 0 */
50
51/* JASPER OFFSET FROM SIO BASE ADDR TO ASR S/W REGISTERS. */
52#define JASPER_ASR_REG_OFFSET 0x38
53
54#define JASPER_ASR_DISABLE_MASK 0x01 /* bit 0: disable = 1, enable = 0 */
55#define JASPER_ASR_TOGGLE_MASK 0x02 /* bit 1: 0, then 1, then 0 */
56
57#define JUNIPER_BASE_ADDRESS 0x54b /* Base address of Juniper ASR */
58#define JUNIPER_ASR_DISABLE_MASK 0x01 /* bit 0: disable = 1 enable = 0 */
59#define JUNIPER_ASR_TOGGLE_MASK 0x02 /* bit 1: 0, then 1, then 0 */
60
61#define SPRUCE_BASE_ADDRESS 0x118e /* Base address of Spruce ASR */
62#define SPRUCE_ASR_DISABLE_MASK 0x01 /* bit 1: disable = 1 enable = 0 */
63#define SPRUCE_ASR_TOGGLE_MASK 0x02 /* bit 0: 0, then 1, then 0 */
64
65
66static int nowayout = WATCHDOG_NOWAYOUT;
67
68static unsigned long asr_is_open;
69static char asr_expect_close;
70
71static unsigned int asr_type, asr_base, asr_length;
72static unsigned int asr_read_addr, asr_write_addr;
73static unsigned char asr_toggle_mask, asr_disable_mask;
74
75static void asr_toggle(void)
76{
77 unsigned char reg = inb(asr_read_addr);
78
79 outb(reg & ~asr_toggle_mask, asr_write_addr);
80 reg = inb(asr_read_addr);
81
82 outb(reg | asr_toggle_mask, asr_write_addr);
83 reg = inb(asr_read_addr);
84
85 outb(reg & ~asr_toggle_mask, asr_write_addr);
86 reg = inb(asr_read_addr);
87}
88
89static void asr_enable(void)
90{
91 unsigned char reg;
92
93 if (asr_type == ASMTYPE_TOPAZ) {
94 /* asr_write_addr == asr_read_addr */
95 reg = inb(asr_read_addr);
96 outb(reg & ~(TOPAZ_ASR_TOGGLE | TOPAZ_ASR_DISABLE),
97 asr_read_addr);
98 } else {
99 /*
100 * First make sure the hardware timer is reset by toggling
101 * ASR hardware timer line.
102 */
103 asr_toggle();
104
105 reg = inb(asr_read_addr);
106 outb(reg & ~asr_disable_mask, asr_write_addr);
107 }
108 reg = inb(asr_read_addr);
109}
110
111static void asr_disable(void)
112{
113 unsigned char reg = inb(asr_read_addr);
114
115 if (asr_type == ASMTYPE_TOPAZ)
116 /* asr_write_addr == asr_read_addr */
117 outb(reg | TOPAZ_ASR_TOGGLE | TOPAZ_ASR_DISABLE,
118 asr_read_addr);
119 else {
120 outb(reg | asr_toggle_mask, asr_write_addr);
121 reg = inb(asr_read_addr);
122
123 outb(reg | asr_disable_mask, asr_write_addr);
124 }
125 reg = inb(asr_read_addr);
126}
127
128static int __init asr_get_base_address(void)
129{
130 unsigned char low, high;
131 const char *type = "";
132
133 asr_length = 1;
134
135 switch (asr_type) {
136 case ASMTYPE_TOPAZ:
137 /* SELECT SuperIO CHIP FOR QUERYING (WRITE 0x07 TO BOTH 0x2E and 0x2F) */
138 outb(0x07, 0x2e);
139 outb(0x07, 0x2f);
140
141 /* SELECT AND READ THE HIGH-NIBBLE OF THE GPIO BASE ADDRESS */
142 outb(0x60, 0x2e);
143 high = inb(0x2f);
144
145 /* SELECT AND READ THE LOW-NIBBLE OF THE GPIO BASE ADDRESS */
146 outb(0x61, 0x2e);
147 low = inb(0x2f);
148
149 asr_base = (high << 16) | low;
150 asr_read_addr = asr_write_addr =
151 asr_base + TOPAZ_ASR_REG_OFFSET;
152 asr_length = 5;
153
154 break;
155
156 case ASMTYPE_JASPER:
157 type = "Jaspers ";
158
159 /* FIXME: need to use pci_config_lock here, but it's not exported */
160
161/* spin_lock_irqsave(&pci_config_lock, flags);*/
162
163 /* Select the SuperIO chip in the PCI I/O port register */
164 outl(0x8000f858, 0xcf8);
165
166 /*
167 * Read the base address for the SuperIO chip.
168 * Only the lower 16 bits are valid, but the address is word
169 * aligned so the last bit must be masked off.
170 */
171 asr_base = inl(0xcfc) & 0xfffe;
172
173/* spin_unlock_irqrestore(&pci_config_lock, flags);*/
174
175 asr_read_addr = asr_write_addr =
176 asr_base + JASPER_ASR_REG_OFFSET;
177 asr_toggle_mask = JASPER_ASR_TOGGLE_MASK;
178 asr_disable_mask = JASPER_ASR_DISABLE_MASK;
179 asr_length = JASPER_ASR_REG_OFFSET + 1;
180
181 break;
182
183 case ASMTYPE_PEARL:
184 type = "Pearls ";
185 asr_base = PEARL_BASE;
186 asr_read_addr = PEARL_READ;
187 asr_write_addr = PEARL_WRITE;
188 asr_toggle_mask = PEARL_ASR_TOGGLE_MASK;
189 asr_disable_mask = PEARL_ASR_DISABLE_MASK;
190 asr_length = 4;
191 break;
192
193 case ASMTYPE_JUNIPER:
194 type = "Junipers ";
195 asr_base = JUNIPER_BASE_ADDRESS;
196 asr_read_addr = asr_write_addr = asr_base;
197 asr_toggle_mask = JUNIPER_ASR_TOGGLE_MASK;
198 asr_disable_mask = JUNIPER_ASR_DISABLE_MASK;
199 break;
200
201 case ASMTYPE_SPRUCE:
202 type = "Spruce's ";
203 asr_base = SPRUCE_BASE_ADDRESS;
204 asr_read_addr = asr_write_addr = asr_base;
205 asr_toggle_mask = SPRUCE_ASR_TOGGLE_MASK;
206 asr_disable_mask = SPRUCE_ASR_DISABLE_MASK;
207 break;
208 }
209
210 if (!request_region(asr_base, asr_length, "ibmasr")) {
211 printk(KERN_ERR PFX "address %#x already in use\n",
212 asr_base);
213 return -EBUSY;
214 }
215
216 printk(KERN_INFO PFX "found %sASR @ addr %#x\n", type, asr_base);
217
218 return 0;
219}
220
221
222static ssize_t asr_write(struct file *file, const char __user *buf,
223 size_t count, loff_t *ppos)
224{
225 if (count) {
226 if (!nowayout) {
227 size_t i;
228
229 /* In case it was set long ago */
230 asr_expect_close = 0;
231
232 for (i = 0; i != count; i++) {
233 char c;
234 if (get_user(c, buf + i))
235 return -EFAULT;
236 if (c == 'V')
237 asr_expect_close = 42;
238 }
239 }
240 asr_toggle();
241 }
242 return count;
243}
244
245static int asr_ioctl(struct inode *inode, struct file *file,
246 unsigned int cmd, unsigned long arg)
247{
248 static const struct watchdog_info ident = {
249 .options = WDIOF_KEEPALIVEPING |
250 WDIOF_MAGICCLOSE,
251 .identity = "IBM ASR"
252 };
253 void __user *argp = (void __user *)arg;
254 int __user *p = argp;
255 int heartbeat;
256
257 switch (cmd) {
258 case WDIOC_GETSUPPORT:
259 return copy_to_user(argp, &ident, sizeof(ident)) ?
260 -EFAULT : 0;
261
262 case WDIOC_GETSTATUS:
263 case WDIOC_GETBOOTSTATUS:
264 return put_user(0, p);
265
266 case WDIOC_KEEPALIVE:
267 asr_toggle();
268 return 0;
269
270 /*
271 * The hardware has a fixed timeout value, so no WDIOC_SETTIMEOUT
272 * and WDIOC_GETTIMEOUT always returns 256.
273 */
274 case WDIOC_GETTIMEOUT:
275 heartbeat = 256;
276 return put_user(heartbeat, p);
277
278 case WDIOC_SETOPTIONS: {
279 int new_options, retval = -EINVAL;
280
281 if (get_user(new_options, p))
282 return -EFAULT;
283
284 if (new_options & WDIOS_DISABLECARD) {
285 asr_disable();
286 retval = 0;
287 }
288
289 if (new_options & WDIOS_ENABLECARD) {
290 asr_enable();
291 asr_toggle();
292 retval = 0;
293 }
294
295 return retval;
296 }
297 }
298
299 return -ENOIOCTLCMD;
300}
301
302static int asr_open(struct inode *inode, struct file *file)
303{
304 if(test_and_set_bit(0, &asr_is_open))
305 return -EBUSY;
306
307 asr_toggle();
308 asr_enable();
309
310 return nonseekable_open(inode, file);
311}
312
313static int asr_release(struct inode *inode, struct file *file)
314{
315 if (asr_expect_close == 42)
316 asr_disable();
317 else {
318 printk(KERN_CRIT PFX "unexpected close, not stopping watchdog!\n");
319 asr_toggle();
320 }
321 clear_bit(0, &asr_is_open);
322 asr_expect_close = 0;
323 return 0;
324}
325
326static struct file_operations asr_fops = {
327 .owner = THIS_MODULE,
328 .llseek = no_llseek,
329 .write = asr_write,
330 .ioctl = asr_ioctl,
331 .open = asr_open,
332 .release = asr_release,
333};
334
335static struct miscdevice asr_miscdev = {
336 .minor = WATCHDOG_MINOR,
337 .name = "watchdog",
338 .fops = &asr_fops,
339};
340
341
342struct ibmasr_id {
343 const char *desc;
344 int type;
345};
346
347static struct ibmasr_id __initdata ibmasr_id_table[] = {
348 { "IBM Automatic Server Restart - eserver xSeries 220", ASMTYPE_TOPAZ },
349 { "IBM Automatic Server Restart - Machine Type 8673", ASMTYPE_PEARL },
350 { "IBM Automatic Server Restart - Machine Type 8480", ASMTYPE_JASPER },
351 { "IBM Automatic Server Restart - Machine Type 8482", ASMTYPE_JUNIPER },
352 { "IBM Automatic Server Restart - Machine Type 8648", ASMTYPE_SPRUCE },
353 { NULL }
354};
355
356static int __init ibmasr_init(void)
357{
358 struct ibmasr_id *id;
359 int rc;
360
361 for (id = ibmasr_id_table; id->desc; id++) {
362 if (dmi_find_device(DMI_DEV_TYPE_OTHER, id->desc, NULL)) {
363 asr_type = id->type;
364 break;
365 }
366 }
367
368 if (!asr_type)
369 return -ENODEV;
370
371 rc = misc_register(&asr_miscdev);
372 if (rc < 0) {
373 printk(KERN_ERR PFX "failed to register misc device\n");
374 return rc;
375 }
376
377 rc = asr_get_base_address();
378 if (rc) {
379 misc_deregister(&asr_miscdev);
380 return rc;
381 }
382
383 return 0;
384}
385
386static void __exit ibmasr_exit(void)
387{
388 if (!nowayout)
389 asr_disable();
390
391 misc_deregister(&asr_miscdev);
392
393 release_region(asr_base, asr_length);
394}
395
396module_init(ibmasr_init);
397module_exit(ibmasr_exit);
398
399module_param(nowayout, int, 0);
400MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
401
402MODULE_DESCRIPTION("IBM Automatic Server Restart driver");
403MODULE_AUTHOR("Andrey Panin");
404MODULE_LICENSE("GPL");
405MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/mpcore_wdt.c b/drivers/char/watchdog/mpcore_wdt.c
index c694eee1fb24..75ca84ed4adf 100644
--- a/drivers/char/watchdog/mpcore_wdt.c
+++ b/drivers/char/watchdog/mpcore_wdt.c
@@ -30,6 +30,8 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/device.h> 32#include <linux/device.h>
33
34#include <asm/hardware/arm_twd.h>
33#include <asm/uaccess.h> 35#include <asm/uaccess.h>
34 36
35struct mpcore_wdt { 37struct mpcore_wdt {
diff --git a/drivers/char/watchdog/mv64x60_wdt.c b/drivers/char/watchdog/mv64x60_wdt.c
new file mode 100644
index 000000000000..6d3ff0836c44
--- /dev/null
+++ b/drivers/char/watchdog/mv64x60_wdt.c
@@ -0,0 +1,252 @@
1/*
2 * mv64x60_wdt.c - MV64X60 (Marvell Discovery) watchdog userspace interface
3 *
4 * Author: James Chapman <jchapman@katalix.com>
5 *
6 * Platform-specific setup code should configure the dog to generate
7 * interrupt or reset as required. This code only enables/disables
8 * and services the watchdog.
9 *
10 * Derived from mpc8xx_wdt.c, with the following copyright.
11 *
12 * 2002 (c) Florian Schirmer <jolt@tuxbox.org> This file is licensed under
13 * the terms of the GNU General Public License version 2. This program
14 * is licensed "as is" without any warranty of any kind, whether express
15 * or implied.
16 */
17
18#include <linux/config.h>
19#include <linux/fs.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/miscdevice.h>
23#include <linux/module.h>
24#include <linux/watchdog.h>
25#include <asm/mv64x60.h>
26#include <asm/uaccess.h>
27#include <asm/io.h>
28
29/* MV64x60 WDC (config) register access definitions */
30#define MV64x60_WDC_CTL1_MASK (3 << 24)
31#define MV64x60_WDC_CTL1(val) ((val & 3) << 24)
32#define MV64x60_WDC_CTL2_MASK (3 << 26)
33#define MV64x60_WDC_CTL2(val) ((val & 3) << 26)
34
35/* Flags bits */
36#define MV64x60_WDOG_FLAG_OPENED 0
37#define MV64x60_WDOG_FLAG_ENABLED 1
38
39static unsigned long wdt_flags;
40static int wdt_status;
41static void __iomem *mv64x60_regs;
42static int mv64x60_wdt_timeout;
43
44static void mv64x60_wdt_reg_write(u32 val)
45{
46 /* Allow write only to CTL1 / CTL2 fields, retaining values in
47 * other fields.
48 */
49 u32 data = readl(mv64x60_regs + MV64x60_WDT_WDC);
50 data &= ~(MV64x60_WDC_CTL1_MASK | MV64x60_WDC_CTL2_MASK);
51 data |= val;
52 writel(data, mv64x60_regs + MV64x60_WDT_WDC);
53}
54
55static void mv64x60_wdt_service(void)
56{
57 /* Write 01 followed by 10 to CTL2 */
58 mv64x60_wdt_reg_write(MV64x60_WDC_CTL2(0x01));
59 mv64x60_wdt_reg_write(MV64x60_WDC_CTL2(0x02));
60}
61
62static void mv64x60_wdt_handler_disable(void)
63{
64 if (test_and_clear_bit(MV64x60_WDOG_FLAG_ENABLED, &wdt_flags)) {
65 /* Write 01 followed by 10 to CTL1 */
66 mv64x60_wdt_reg_write(MV64x60_WDC_CTL1(0x01));
67 mv64x60_wdt_reg_write(MV64x60_WDC_CTL1(0x02));
68 printk(KERN_NOTICE "mv64x60_wdt: watchdog deactivated\n");
69 }
70}
71
72static void mv64x60_wdt_handler_enable(void)
73{
74 if (!test_and_set_bit(MV64x60_WDOG_FLAG_ENABLED, &wdt_flags)) {
75 /* Write 01 followed by 10 to CTL1 */
76 mv64x60_wdt_reg_write(MV64x60_WDC_CTL1(0x01));
77 mv64x60_wdt_reg_write(MV64x60_WDC_CTL1(0x02));
78 printk(KERN_NOTICE "mv64x60_wdt: watchdog activated\n");
79 }
80}
81
82static int mv64x60_wdt_open(struct inode *inode, struct file *file)
83{
84 if (test_and_set_bit(MV64x60_WDOG_FLAG_OPENED, &wdt_flags))
85 return -EBUSY;
86
87 mv64x60_wdt_service();
88 mv64x60_wdt_handler_enable();
89
90 nonseekable_open(inode, file);
91
92 return 0;
93}
94
95static int mv64x60_wdt_release(struct inode *inode, struct file *file)
96{
97 mv64x60_wdt_service();
98
99#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
100 mv64x60_wdt_handler_disable();
101#endif
102
103 clear_bit(MV64x60_WDOG_FLAG_OPENED, &wdt_flags);
104
105 return 0;
106}
107
108static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data,
109 size_t len, loff_t * ppos)
110{
111 if (len)
112 mv64x60_wdt_service();
113
114 return len;
115}
116
117static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file,
118 unsigned int cmd, unsigned long arg)
119{
120 int timeout;
121 void __user *argp = (void __user *)arg;
122 static struct watchdog_info info = {
123 .options = WDIOF_KEEPALIVEPING,
124 .firmware_version = 0,
125 .identity = "MV64x60 watchdog",
126 };
127
128 switch (cmd) {
129 case WDIOC_GETSUPPORT:
130 if (copy_to_user(argp, &info, sizeof(info)))
131 return -EFAULT;
132 break;
133
134 case WDIOC_GETSTATUS:
135 case WDIOC_GETBOOTSTATUS:
136 if (put_user(wdt_status, (int __user *)argp))
137 return -EFAULT;
138 wdt_status &= ~WDIOF_KEEPALIVEPING;
139 break;
140
141 case WDIOC_GETTEMP:
142 return -EOPNOTSUPP;
143
144 case WDIOC_SETOPTIONS:
145 return -EOPNOTSUPP;
146
147 case WDIOC_KEEPALIVE:
148 mv64x60_wdt_service();
149 wdt_status |= WDIOF_KEEPALIVEPING;
150 break;
151
152 case WDIOC_SETTIMEOUT:
153 return -EOPNOTSUPP;
154
155 case WDIOC_GETTIMEOUT:
156 timeout = mv64x60_wdt_timeout * HZ;
157 if (put_user(timeout, (int __user *)argp))
158 return -EFAULT;
159 break;
160
161 default:
162 return -ENOIOCTLCMD;
163 }
164
165 return 0;
166}
167
168static struct file_operations mv64x60_wdt_fops = {
169 .owner = THIS_MODULE,
170 .llseek = no_llseek,
171 .write = mv64x60_wdt_write,
172 .ioctl = mv64x60_wdt_ioctl,
173 .open = mv64x60_wdt_open,
174 .release = mv64x60_wdt_release,
175};
176
177static struct miscdevice mv64x60_wdt_miscdev = {
178 .minor = WATCHDOG_MINOR,
179 .name = "watchdog",
180 .fops = &mv64x60_wdt_fops,
181};
182
183static int __devinit mv64x60_wdt_probe(struct device *dev)
184{
185 struct platform_device *pd = to_platform_device(dev);
186 struct mv64x60_wdt_pdata *pdata = pd->dev.platform_data;
187 int bus_clk = 133;
188
189 mv64x60_wdt_timeout = 10;
190 if (pdata) {
191 mv64x60_wdt_timeout = pdata->timeout;
192 bus_clk = pdata->bus_clk;
193 }
194
195 mv64x60_regs = mv64x60_get_bridge_vbase();
196
197 writel((mv64x60_wdt_timeout * (bus_clk * 1000000)) >> 8,
198 mv64x60_regs + MV64x60_WDT_WDC);
199
200 return misc_register(&mv64x60_wdt_miscdev);
201}
202
203static int __devexit mv64x60_wdt_remove(struct device *dev)
204{
205 misc_deregister(&mv64x60_wdt_miscdev);
206
207 mv64x60_wdt_service();
208 mv64x60_wdt_handler_disable();
209
210 return 0;
211}
212
213static struct device_driver mv64x60_wdt_driver = {
214 .name = MV64x60_WDT_NAME,
215 .bus = &platform_bus_type,
216 .probe = mv64x60_wdt_probe,
217 .remove = __devexit_p(mv64x60_wdt_remove),
218};
219
220static struct platform_device *mv64x60_wdt_dev;
221
222static int __init mv64x60_wdt_init(void)
223{
224 int ret;
225
226 printk(KERN_INFO "MV64x60 watchdog driver\n");
227
228 mv64x60_wdt_dev = platform_device_register_simple(MV64x60_WDT_NAME,
229 -1, NULL, 0);
230 if (IS_ERR(mv64x60_wdt_dev)) {
231 ret = PTR_ERR(mv64x60_wdt_dev);
232 goto out;
233 }
234
235 ret = driver_register(&mv64x60_wdt_driver);
236 out:
237 return ret;
238}
239
240static void __exit mv64x60_wdt_exit(void)
241{
242 driver_unregister(&mv64x60_wdt_driver);
243 platform_device_unregister(mv64x60_wdt_dev);
244}
245
246module_init(mv64x60_wdt_init);
247module_exit(mv64x60_wdt_exit);
248
249MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
250MODULE_DESCRIPTION("MV64x60 watchdog driver");
251MODULE_LICENSE("GPL");
252MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/char/watchdog/pcwd_pci.c b/drivers/char/watchdog/pcwd_pci.c
index 2b13afb09c5d..5a80adbf8032 100644
--- a/drivers/char/watchdog/pcwd_pci.c
+++ b/drivers/char/watchdog/pcwd_pci.c
@@ -29,27 +29,29 @@
29 * Includes, defines, variables, module parameters, ... 29 * Includes, defines, variables, module parameters, ...
30 */ 30 */
31 31
32#include <linux/config.h> 32#include <linux/config.h> /* For CONFIG_WATCHDOG_NOWAYOUT/... */
33#include <linux/module.h> 33#include <linux/module.h> /* For module specific items */
34#include <linux/moduleparam.h> 34#include <linux/moduleparam.h> /* For new moduleparam's */
35#include <linux/types.h> 35#include <linux/types.h> /* For standard types (like size_t) */
36#include <linux/delay.h> 36#include <linux/errno.h> /* For the -ENODEV/... values */
37#include <linux/miscdevice.h> 37#include <linux/kernel.h> /* For printk/panic/... */
38#include <linux/watchdog.h> 38#include <linux/delay.h> /* For mdelay function */
39#include <linux/notifier.h> 39#include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR) */
40#include <linux/reboot.h> 40#include <linux/watchdog.h> /* For the watchdog specific items */
41#include <linux/init.h> 41#include <linux/notifier.h> /* For notifier support */
42#include <linux/fs.h> 42#include <linux/reboot.h> /* For reboot_notifier stuff */
43#include <linux/pci.h> 43#include <linux/init.h> /* For __init/__exit/... */
44#include <linux/ioport.h> 44#include <linux/fs.h> /* For file operations */
45#include <linux/spinlock.h> 45#include <linux/pci.h> /* For pci functions */
46 46#include <linux/ioport.h> /* For io-port access */
47#include <asm/uaccess.h> 47#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
48#include <asm/io.h> 48
49#include <asm/uaccess.h> /* For copy_to_user/put_user/... */
50#include <asm/io.h> /* For inb/outb/... */
49 51
50/* Module and version information */ 52/* Module and version information */
51#define WATCHDOG_VERSION "1.01" 53#define WATCHDOG_VERSION "1.01"
52#define WATCHDOG_DATE "15 Mar 2005" 54#define WATCHDOG_DATE "02 Sep 2005"
53#define WATCHDOG_DRIVER_NAME "PCI-PC Watchdog" 55#define WATCHDOG_DRIVER_NAME "PCI-PC Watchdog"
54#define WATCHDOG_NAME "pcwd_pci" 56#define WATCHDOG_NAME "pcwd_pci"
55#define PFX WATCHDOG_NAME ": " 57#define PFX WATCHDOG_NAME ": "
@@ -335,12 +337,14 @@ static int pcipcwd_ioctl(struct inode *inode, struct file *file,
335 return -EFAULT; 337 return -EFAULT;
336 338
337 if (new_options & WDIOS_DISABLECARD) { 339 if (new_options & WDIOS_DISABLECARD) {
338 pcipcwd_stop(); 340 if (pcipcwd_stop())
341 return -EIO;
339 retval = 0; 342 retval = 0;
340 } 343 }
341 344
342 if (new_options & WDIOS_ENABLECARD) { 345 if (new_options & WDIOS_ENABLECARD) {
343 pcipcwd_start(); 346 if (pcipcwd_start())
347 return -EIO;
344 retval = 0; 348 retval = 0;
345 } 349 }
346 350
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c
index 8b292bf343c4..3625b2601b42 100644
--- a/drivers/char/watchdog/s3c2410_wdt.c
+++ b/drivers/char/watchdog/s3c2410_wdt.c
@@ -464,7 +464,7 @@ static void s3c2410wdt_shutdown(struct device *dev)
464static unsigned long wtcon_save; 464static unsigned long wtcon_save;
465static unsigned long wtdat_save; 465static unsigned long wtdat_save;
466 466
467static int s3c2410wdt_suspend(struct device *dev, u32 state, u32 level) 467static int s3c2410wdt_suspend(struct device *dev, pm_message_t state, u32 level)
468{ 468{
469 if (level == SUSPEND_POWER_DOWN) { 469 if (level == SUSPEND_POWER_DOWN) {
470 /* Save watchdog state, and turn it off. */ 470 /* Save watchdog state, and turn it off. */
diff --git a/drivers/char/watchdog/sbc8360.c b/drivers/char/watchdog/sbc8360.c
new file mode 100644
index 000000000000..c6cbf808d8c2
--- /dev/null
+++ b/drivers/char/watchdog/sbc8360.c
@@ -0,0 +1,414 @@
1/*
2 * SBC8360 Watchdog driver
3 *
4 * (c) Copyright 2005 Webcon, Inc.
5 *
6 * Based on ib700wdt.c, which is based on advantechwdt.c which is based
7 * on acquirewdt.c which is based on wdt.c.
8 *
9 * (c) Copyright 2001 Charles Howes <chowes@vsol.net>
10 *
11 * Based on advantechwdt.c which is based on acquirewdt.c which
12 * is based on wdt.c.
13 *
14 * (c) Copyright 2000-2001 Marek Michalkiewicz <marekm@linux.org.pl>
15 *
16 * Based on acquirewdt.c which is based on wdt.c.
17 * Original copyright messages:
18 *
19 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
20 * http://www.redhat.com
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 *
27 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
28 * warranty for any of this software. This material is provided
29 * "AS-IS" and at no charge.
30 *
31 * (c) Copyright 1995 Alan Cox <alan@redhat.com>
32 *
33 * 14-Dec-2001 Matt Domsch <Matt_Domsch@dell.com>
34 * Added nowayout module option to override CONFIG_WATCHDOG_NOWAYOUT
35 * Added timeout module option to override default
36 *
37 */
38
39#include <linux/config.h>
40#include <linux/module.h>
41#include <linux/types.h>
42#include <linux/miscdevice.h>
43#include <linux/watchdog.h>
44#include <linux/ioport.h>
45#include <linux/delay.h>
46#include <linux/notifier.h>
47#include <linux/fs.h>
48#include <linux/reboot.h>
49#include <linux/init.h>
50#include <linux/spinlock.h>
51#include <linux/moduleparam.h>
52
53#include <asm/io.h>
54#include <asm/uaccess.h>
55#include <asm/system.h>
56
57static unsigned long sbc8360_is_open;
58static spinlock_t sbc8360_lock;
59static char expect_close;
60
61#define PFX "sbc8360: "
62
63/*
64 *
65 * Watchdog Timer Configuration
66 *
67 * The function of the watchdog timer is to reset the system automatically
68 * and is defined at I/O port 0120H and 0121H. To enable the watchdog timer
69 * and allow the system to reset, write appropriate values from the table
70 * below to I/O port 0120H and 0121H. To disable the timer, write a zero
71 * value to I/O port 0121H for the system to stop the watchdog function.
72 *
73 * The following describes how the timer should be programmed (according to
74 * the vendor documentation)
75 *
76 * Enabling Watchdog:
77 * MOV AX,000AH (enable, phase I)
78 * MOV DX,0120H
79 * OUT DX,AX
80 * MOV AX,000BH (enable, phase II)
81 * MOV DX,0120H
82 * OUT DX,AX
83 * MOV AX,000nH (set multiplier n, from 1-4)
84 * MOV DX,0120H
85 * OUT DX,AX
86 * MOV AX,000mH (set base timer m, from 0-F)
87 * MOV DX,0121H
88 * OUT DX,AX
89 *
90 * Reset timer:
91 * MOV AX,000mH (same as set base timer, above)
92 * MOV DX,0121H
93 * OUT DX,AX
94 *
95 * Disabling Watchdog:
96 * MOV AX,0000H (a zero value)
97 * MOV DX,0120H
98 * OUT DX,AX
99 *
100 * Watchdog timeout configuration values:
101 * N
102 * M | 1 2 3 4
103 * --|----------------------------------
104 * 0 | 0.5s 5s 50s 100s
105 * 1 | 1s 10s 100s 200s
106 * 2 | 1.5s 15s 150s 300s
107 * 3 | 2s 20s 200s 400s
108 * 4 | 2.5s 25s 250s 500s
109 * 5 | 3s 30s 300s 600s
110 * 6 | 3.5s 35s 350s 700s
111 * 7 | 4s 40s 400s 800s
112 * 8 | 4.5s 45s 450s 900s
113 * 9 | 5s 50s 500s 1000s
114 * A | 5.5s 55s 550s 1100s
115 * B | 6s 60s 600s 1200s
116 * C | 6.5s 65s 650s 1300s
117 * D | 7s 70s 700s 1400s
118 * E | 7.5s 75s 750s 1500s
119 * F | 8s 80s 800s 1600s
120 *
121 * Another way to say the same things is:
122 * For N=1, Timeout = (M+1) * 0.5s
123 * For N=2, Timeout = (M+1) * 5s
124 * For N=3, Timeout = (M+1) * 50s
125 * For N=4, Timeout = (M+1) * 100s
126 *
127 */
128
129static int wd_times[64][2] = {
130 {0, 1}, /* 0 = 0.5s */
131 {1, 1}, /* 1 = 1s */
132 {2, 1}, /* 2 = 1.5s */
133 {3, 1}, /* 3 = 2s */
134 {4, 1}, /* 4 = 2.5s */
135 {5, 1}, /* 5 = 3s */
136 {6, 1}, /* 6 = 3.5s */
137 {7, 1}, /* 7 = 4s */
138 {8, 1}, /* 8 = 4.5s */
139 {9, 1}, /* 9 = 5s */
140 {0xA, 1}, /* 10 = 5.5s */
141 {0xB, 1}, /* 11 = 6s */
142 {0xC, 1}, /* 12 = 6.5s */
143 {0xD, 1}, /* 13 = 7s */
144 {0xE, 1}, /* 14 = 7.5s */
145 {0xF, 1}, /* 15 = 8s */
146 {0, 2}, /* 16 = 5s */
147 {1, 2}, /* 17 = 10s */
148 {2, 2}, /* 18 = 15s */
149 {3, 2}, /* 19 = 20s */
150 {4, 2}, /* 20 = 25s */
151 {5, 2}, /* 21 = 30s */
152 {6, 2}, /* 22 = 35s */
153 {7, 2}, /* 23 = 40s */
154 {8, 2}, /* 24 = 45s */
155 {9, 2}, /* 25 = 50s */
156 {0xA, 2}, /* 26 = 55s */
157 {0xB, 2}, /* 27 = 60s */
158 {0xC, 2}, /* 28 = 65s */
159 {0xD, 2}, /* 29 = 70s */
160 {0xE, 2}, /* 30 = 75s */
161 {0xF, 2}, /* 31 = 80s */
162 {0, 3}, /* 32 = 50s */
163 {1, 3}, /* 33 = 100s */
164 {2, 3}, /* 34 = 150s */
165 {3, 3}, /* 35 = 200s */
166 {4, 3}, /* 36 = 250s */
167 {5, 3}, /* 37 = 300s */
168 {6, 3}, /* 38 = 350s */
169 {7, 3}, /* 39 = 400s */
170 {8, 3}, /* 40 = 450s */
171 {9, 3}, /* 41 = 500s */
172 {0xA, 3}, /* 42 = 550s */
173 {0xB, 3}, /* 43 = 600s */
174 {0xC, 3}, /* 44 = 650s */
175 {0xD, 3}, /* 45 = 700s */
176 {0xE, 3}, /* 46 = 750s */
177 {0xF, 3}, /* 47 = 800s */
178 {0, 4}, /* 48 = 100s */
179 {1, 4}, /* 49 = 200s */
180 {2, 4}, /* 50 = 300s */
181 {3, 4}, /* 51 = 400s */
182 {4, 4}, /* 52 = 500s */
183 {5, 4}, /* 53 = 600s */
184 {6, 4}, /* 54 = 700s */
185 {7, 4}, /* 55 = 800s */
186 {8, 4}, /* 56 = 900s */
187 {9, 4}, /* 57 = 1000s */
188 {0xA, 4}, /* 58 = 1100s */
189 {0xB, 4}, /* 59 = 1200s */
190 {0xC, 4}, /* 60 = 1300s */
191 {0xD, 4}, /* 61 = 1400s */
192 {0xE, 4}, /* 62 = 1500s */
193 {0xF, 4} /* 63 = 1600s */
194};
195
196#define SBC8360_ENABLE 0x120
197#define SBC8360_BASETIME 0x121
198
199static int timeout = 27;
200static int wd_margin = 0xB;
201static int wd_multiplier = 2;
202static int nowayout = WATCHDOG_NOWAYOUT;
203
204module_param(timeout, int, 27);
205MODULE_PARM_DESC(timeout, "Index into timeout table (0-63) (default=27 (60s))");
206module_param(nowayout, int, 0);
207MODULE_PARM_DESC(nowayout,
208 "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
209
210/*
211 * Kernel methods.
212 */
213
214/* Activate and pre-configure watchdog */
215static void sbc8360_activate(void)
216{
217 /* Enable the watchdog */
218 outb(0x0A, SBC8360_ENABLE);
219 msleep_interruptible(100);
220 outb(0x0B, SBC8360_ENABLE);
221 msleep_interruptible(100);
222 /* Set timeout multiplier */
223 outb(wd_multiplier, SBC8360_ENABLE);
224 msleep_interruptible(100);
225 /* Nothing happens until first sbc8360_ping() */
226}
227
228/* Kernel pings watchdog */
229static void sbc8360_ping(void)
230{
231 /* Write the base timer register */
232 outb(wd_margin, SBC8360_BASETIME);
233}
234
235/* Userspace pings kernel driver, or requests clean close */
236static ssize_t sbc8360_write(struct file *file, const char __user * buf,
237 size_t count, loff_t * ppos)
238{
239 if (count) {
240 if (!nowayout) {
241 size_t i;
242
243 /* In case it was set long ago */
244 expect_close = 0;
245
246 for (i = 0; i != count; i++) {
247 char c;
248 if (get_user(c, buf + i))
249 return -EFAULT;
250 if (c == 'V')
251 expect_close = 42;
252 }
253 }
254 sbc8360_ping();
255 }
256 return count;
257}
258
259static int sbc8360_open(struct inode *inode, struct file *file)
260{
261 spin_lock(&sbc8360_lock);
262 if (test_and_set_bit(0, &sbc8360_is_open)) {
263 spin_unlock(&sbc8360_lock);
264 return -EBUSY;
265 }
266 if (nowayout)
267 __module_get(THIS_MODULE);
268
269 /* Activate and ping once to start the countdown */
270 spin_unlock(&sbc8360_lock);
271 sbc8360_activate();
272 sbc8360_ping();
273 return nonseekable_open(inode, file);
274}
275
276static int sbc8360_close(struct inode *inode, struct file *file)
277{
278 spin_lock(&sbc8360_lock);
279 if (expect_close == 42)
280 outb(0, SBC8360_ENABLE);
281 else
282 printk(KERN_CRIT PFX
283 "SBC8360 device closed unexpectedly. SBC8360 will not stop!\n");
284
285 clear_bit(0, &sbc8360_is_open);
286 expect_close = 0;
287 spin_unlock(&sbc8360_lock);
288 return 0;
289}
290
291/*
292 * Notifier for system down
293 */
294
295static int sbc8360_notify_sys(struct notifier_block *this, unsigned long code,
296 void *unused)
297{
298 if (code == SYS_DOWN || code == SYS_HALT) {
299 /* Disable the SBC8360 Watchdog */
300 outb(0, SBC8360_ENABLE);
301 }
302 return NOTIFY_DONE;
303}
304
305/*
306 * Kernel Interfaces
307 */
308
309static struct file_operations sbc8360_fops = {
310 .owner = THIS_MODULE,
311 .llseek = no_llseek,
312 .write = sbc8360_write,
313 .open = sbc8360_open,
314 .release = sbc8360_close,
315};
316
317static struct miscdevice sbc8360_miscdev = {
318 .minor = WATCHDOG_MINOR,
319 .name = "watchdog",
320 .fops = &sbc8360_fops,
321};
322
323/*
324 * The SBC8360 needs to learn about soft shutdowns in order to
325 * turn the timebomb registers off.
326 */
327
328static struct notifier_block sbc8360_notifier = {
329 .notifier_call = sbc8360_notify_sys,
330};
331
332static int __init sbc8360_init(void)
333{
334 int res;
335 unsigned long int mseconds = 60000;
336
337 spin_lock_init(&sbc8360_lock);
338 res = misc_register(&sbc8360_miscdev);
339 if (res) {
340 printk(KERN_ERR PFX "failed to register misc device\n");
341 goto out_nomisc;
342 }
343
344 if (!request_region(SBC8360_ENABLE, 1, "SBC8360")) {
345 printk(KERN_ERR PFX "ENABLE method I/O %X is not available.\n",
346 SBC8360_ENABLE);
347 res = -EIO;
348 goto out_noenablereg;
349 }
350 if (!request_region(SBC8360_BASETIME, 1, "SBC8360")) {
351 printk(KERN_ERR PFX
352 "BASETIME method I/O %X is not available.\n",
353 SBC8360_BASETIME);
354 res = -EIO;
355 goto out_nobasetimereg;
356 }
357
358 res = register_reboot_notifier(&sbc8360_notifier);
359 if (res) {
360 printk(KERN_ERR PFX "Failed to register reboot notifier.\n");
361 goto out_noreboot;
362 }
363
364 if (timeout < 0 || timeout > 63) {
365 printk(KERN_ERR PFX "Invalid timeout index (must be 0-63).\n");
366 res = -EINVAL;
367 goto out_noreboot;
368 }
369
370 wd_margin = wd_times[timeout][0];
371 wd_multiplier = wd_times[timeout][1];
372
373 if (wd_multiplier == 1)
374 mseconds = (wd_margin + 1) * 500;
375 else if (wd_multiplier == 2)
376 mseconds = (wd_margin + 1) * 5000;
377 else if (wd_multiplier == 3)
378 mseconds = (wd_margin + 1) * 50000;
379 else if (wd_multiplier == 4)
380 mseconds = (wd_margin + 1) * 100000;
381
382 /* My kingdom for the ability to print "0.5 seconds" in the kernel! */
383 printk(KERN_INFO PFX "Timeout set at %ld ms.\n", mseconds);
384
385 return 0;
386
387 out_noreboot:
388 release_region(SBC8360_ENABLE, 1);
389 release_region(SBC8360_BASETIME, 1);
390 out_noenablereg:
391 out_nobasetimereg:
392 misc_deregister(&sbc8360_miscdev);
393 out_nomisc:
394 return res;
395}
396
397static void __exit sbc8360_exit(void)
398{
399 misc_deregister(&sbc8360_miscdev);
400 unregister_reboot_notifier(&sbc8360_notifier);
401 release_region(SBC8360_ENABLE, 1);
402 release_region(SBC8360_BASETIME, 1);
403}
404
405module_init(sbc8360_init);
406module_exit(sbc8360_exit);
407
408MODULE_AUTHOR("Ian E. Morgan <imorgan@webcon.ca>");
409MODULE_DESCRIPTION("SBC8360 watchdog driver");
410MODULE_LICENSE("GPL");
411MODULE_VERSION("1.0");
412MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
413
414/* end of sbc8360.c */
diff --git a/drivers/char/watchdog/w83977f_wdt.c b/drivers/char/watchdog/w83977f_wdt.c
new file mode 100644
index 000000000000..a7ff64c8921f
--- /dev/null
+++ b/drivers/char/watchdog/w83977f_wdt.c
@@ -0,0 +1,543 @@
1/*
2 * W83977F Watchdog Timer Driver for Winbond W83977F I/O Chip
3 *
4 * (c) Copyright 2005 Jose Goncalves <jose.goncalves@inov.pt>
5 *
6 * Based on w83877f_wdt.c by Scott Jennings,
7 * and wdt977.c by Woody Suwalski
8 *
9 * -----------------------
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/config.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/fs.h>
24#include <linux/miscdevice.h>
25#include <linux/init.h>
26#include <linux/ioport.h>
27#include <linux/watchdog.h>
28#include <linux/notifier.h>
29#include <linux/reboot.h>
30
31#include <asm/io.h>
32#include <asm/system.h>
33#include <asm/uaccess.h>
34
35#define WATCHDOG_VERSION "1.00"
36#define WATCHDOG_NAME "W83977F WDT"
37#define PFX WATCHDOG_NAME ": "
38#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n"
39
40#define IO_INDEX_PORT 0x3F0
41#define IO_DATA_PORT (IO_INDEX_PORT+1)
42
43#define UNLOCK_DATA 0x87
44#define LOCK_DATA 0xAA
45#define DEVICE_REGISTER 0x07
46
47#define DEFAULT_TIMEOUT 45 /* default timeout in seconds */
48
49static int timeout = DEFAULT_TIMEOUT;
50static int timeoutW; /* timeout in watchdog counter units */
51static unsigned long timer_alive;
52static int testmode;
53static char expect_close;
54static spinlock_t spinlock;
55
56module_param(timeout, int, 0);
57MODULE_PARM_DESC(timeout,"Watchdog timeout in seconds (15..7635), default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")");
58module_param(testmode, int, 0);
59MODULE_PARM_DESC(testmode,"Watchdog testmode (1 = no reboot), default=0");
60
61static int nowayout = WATCHDOG_NOWAYOUT;
62module_param(nowayout, int, 0);
63MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
64
65/*
66 * Start the watchdog
67 */
68
69static int wdt_start(void)
70{
71 unsigned long flags;
72
73 spin_lock_irqsave(&spinlock, flags);
74
75 /* Unlock the SuperIO chip */
76 outb_p(UNLOCK_DATA,IO_INDEX_PORT);
77 outb_p(UNLOCK_DATA,IO_INDEX_PORT);
78
79 /*
80 * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4.
81 * F2 has the timeout in watchdog counter units.
82 * F3 is set to enable watchdog LED blink at timeout.
83 * F4 is used to just clear the TIMEOUT'ed state (bit 0).
84 */
85 outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
86 outb_p(0x08,IO_DATA_PORT);
87 outb_p(0xF2,IO_INDEX_PORT);
88 outb_p(timeoutW,IO_DATA_PORT);
89 outb_p(0xF3,IO_INDEX_PORT);
90 outb_p(0x08,IO_DATA_PORT);
91 outb_p(0xF4,IO_INDEX_PORT);
92 outb_p(0x00,IO_DATA_PORT);
93
94 /* Set device Aux2 active */
95 outb_p(0x30,IO_INDEX_PORT);
96 outb_p(0x01,IO_DATA_PORT);
97
98 /*
99 * Select device Aux1 (dev=7) to set GP16 as the watchdog output
100 * (in reg E6) and GP13 as the watchdog LED output (in reg E3).
101 * Map GP16 at pin 119.
102 * In test mode watch the bit 0 on F4 to indicate "triggered" or
103 * check watchdog LED on SBC.
104 */
105 outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
106 outb_p(0x07,IO_DATA_PORT);
107 if (!testmode)
108 {
109 unsigned pin_map;
110
111 outb_p(0xE6,IO_INDEX_PORT);
112 outb_p(0x0A,IO_DATA_PORT);
113 outb_p(0x2C,IO_INDEX_PORT);
114 pin_map = inb_p(IO_DATA_PORT);
115 pin_map |= 0x10;
116 pin_map &= ~(0x20);
117 outb_p(0x2C,IO_INDEX_PORT);
118 outb_p(pin_map,IO_DATA_PORT);
119 }
120 outb_p(0xE3,IO_INDEX_PORT);
121 outb_p(0x08,IO_DATA_PORT);
122
123 /* Set device Aux1 active */
124 outb_p(0x30,IO_INDEX_PORT);
125 outb_p(0x01,IO_DATA_PORT);
126
127 /* Lock the SuperIO chip */
128 outb_p(LOCK_DATA,IO_INDEX_PORT);
129
130 spin_unlock_irqrestore(&spinlock, flags);
131
132 printk(KERN_INFO PFX "activated.\n");
133
134 return 0;
135}
136
137/*
138 * Stop the watchdog
139 */
140
141static int wdt_stop(void)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&spinlock, flags);
146
147 /* Unlock the SuperIO chip */
148 outb_p(UNLOCK_DATA,IO_INDEX_PORT);
149 outb_p(UNLOCK_DATA,IO_INDEX_PORT);
150
151 /*
152 * Select device Aux2 (device=8) to set watchdog regs F2, F3 and F4.
153 * F2 is reset to its default value (watchdog timer disabled).
154 * F3 is reset to its default state.
155 * F4 clears the TIMEOUT'ed state (bit 0) - back to default.
156 */
157 outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
158 outb_p(0x08,IO_DATA_PORT);
159 outb_p(0xF2,IO_INDEX_PORT);
160 outb_p(0xFF,IO_DATA_PORT);
161 outb_p(0xF3,IO_INDEX_PORT);
162 outb_p(0x00,IO_DATA_PORT);
163 outb_p(0xF4,IO_INDEX_PORT);
164 outb_p(0x00,IO_DATA_PORT);
165 outb_p(0xF2,IO_INDEX_PORT);
166 outb_p(0x00,IO_DATA_PORT);
167
168 /*
169 * Select device Aux1 (dev=7) to set GP16 (in reg E6) and
170 * Gp13 (in reg E3) as inputs.
171 */
172 outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
173 outb_p(0x07,IO_DATA_PORT);
174 if (!testmode)
175 {
176 outb_p(0xE6,IO_INDEX_PORT);
177 outb_p(0x01,IO_DATA_PORT);
178 }
179 outb_p(0xE3,IO_INDEX_PORT);
180 outb_p(0x01,IO_DATA_PORT);
181
182 /* Lock the SuperIO chip */
183 outb_p(LOCK_DATA,IO_INDEX_PORT);
184
185 spin_unlock_irqrestore(&spinlock, flags);
186
187 printk(KERN_INFO PFX "shutdown.\n");
188
189 return 0;
190}
191
192/*
193 * Send a keepalive ping to the watchdog
194 * This is done by simply re-writing the timeout to reg. 0xF2
195 */
196
197static int wdt_keepalive(void)
198{
199 unsigned long flags;
200
201 spin_lock_irqsave(&spinlock, flags);
202
203 /* Unlock the SuperIO chip */
204 outb_p(UNLOCK_DATA,IO_INDEX_PORT);
205 outb_p(UNLOCK_DATA,IO_INDEX_PORT);
206
207 /* Select device Aux2 (device=8) to kick watchdog reg F2 */
208 outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
209 outb_p(0x08,IO_DATA_PORT);
210 outb_p(0xF2,IO_INDEX_PORT);
211 outb_p(timeoutW,IO_DATA_PORT);
212
213 /* Lock the SuperIO chip */
214 outb_p(LOCK_DATA,IO_INDEX_PORT);
215
216 spin_unlock_irqrestore(&spinlock, flags);
217
218 return 0;
219}
220
221/*
222 * Set the watchdog timeout value
223 */
224
225static int wdt_set_timeout(int t)
226{
227 int tmrval;
228
229 /*
230 * Convert seconds to watchdog counter time units, rounding up.
231 * On PCM-5335 watchdog units are 30 seconds/step with 15 sec startup
232 * value. This information is supplied in the PCM-5335 manual and was
233 * checked by me on a real board. This is a bit strange because W83977f
234 * datasheet says counter unit is in minutes!
235 */
236 if (t < 15)
237 return -EINVAL;
238
239 tmrval = ((t + 15) + 29) / 30;
240
241 if (tmrval > 255)
242 return -EINVAL;
243
244 /*
245 * timeout is the timeout in seconds,
246 * timeoutW is the timeout in watchdog counter units.
247 */
248 timeoutW = tmrval;
249 timeout = (timeoutW * 30) - 15;
250 return 0;
251}
252
253/*
254 * Get the watchdog status
255 */
256
257static int wdt_get_status(int *status)
258{
259 int new_status;
260 unsigned long flags;
261
262 spin_lock_irqsave(&spinlock, flags);
263
264 /* Unlock the SuperIO chip */
265 outb_p(UNLOCK_DATA,IO_INDEX_PORT);
266 outb_p(UNLOCK_DATA,IO_INDEX_PORT);
267
268 /* Select device Aux2 (device=8) to read watchdog reg F4 */
269 outb_p(DEVICE_REGISTER,IO_INDEX_PORT);
270 outb_p(0x08,IO_DATA_PORT);
271 outb_p(0xF4,IO_INDEX_PORT);
272 new_status = inb_p(IO_DATA_PORT);
273
274 /* Lock the SuperIO chip */
275 outb_p(LOCK_DATA,IO_INDEX_PORT);
276
277 spin_unlock_irqrestore(&spinlock, flags);
278
279 *status = 0;
280 if (new_status & 1)
281 *status |= WDIOF_CARDRESET;
282
283 return 0;
284}
285
286
287/*
288 * /dev/watchdog handling
289 */
290
291static int wdt_open(struct inode *inode, struct file *file)
292{
293 /* If the watchdog is alive we don't need to start it again */
294 if( test_and_set_bit(0, &timer_alive) )
295 return -EBUSY;
296
297 if (nowayout)
298 __module_get(THIS_MODULE);
299
300 wdt_start();
301 return nonseekable_open(inode, file);
302}
303
304static int wdt_release(struct inode *inode, struct file *file)
305{
306 /*
307 * Shut off the timer.
308 * Lock it in if it's a module and we set nowayout
309 */
310 if (expect_close == 42)
311 {
312 wdt_stop();
313 clear_bit(0, &timer_alive);
314 } else {
315 wdt_keepalive();
316 printk(KERN_CRIT PFX "unexpected close, not stopping watchdog!\n");
317 }
318 expect_close = 0;
319 return 0;
320}
321
322/*
323 * wdt_write:
324 * @file: file handle to the watchdog
325 * @buf: buffer to write (unused as data does not matter here
326 * @count: count of bytes
327 * @ppos: pointer to the position to write. No seeks allowed
328 *
329 * A write to a watchdog device is defined as a keepalive signal. Any
330 * write of data will do, as we we don't define content meaning.
331 */
332
333static ssize_t wdt_write(struct file *file, const char __user *buf,
334 size_t count, loff_t *ppos)
335{
336 /* See if we got the magic character 'V' and reload the timer */
337 if(count)
338 {
339 if (!nowayout)
340 {
341 size_t ofs;
342
343 /* note: just in case someone wrote the magic character long ago */
344 expect_close = 0;
345
346 /* scan to see whether or not we got the magic character */
347 for(ofs = 0; ofs != count; ofs++)
348 {
349 char c;
350 if (get_user(c, buf + ofs))
351 return -EFAULT;
352 if (c == 'V') {
353 expect_close = 42;
354 }
355 }
356 }
357
358 /* someone wrote to us, we should restart timer */
359 wdt_keepalive();
360 }
361 return count;
362}
363
364/*
365 * wdt_ioctl:
366 * @inode: inode of the device
367 * @file: file handle to the device
368 * @cmd: watchdog command
369 * @arg: argument pointer
370 *
371 * The watchdog API defines a common set of functions for all watchdogs
372 * according to their available features.
373 */
374
375static struct watchdog_info ident = {
376 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
377 .firmware_version = 1,
378 .identity = WATCHDOG_NAME,
379};
380
381static int wdt_ioctl(struct inode *inode, struct file *file,
382 unsigned int cmd, unsigned long arg)
383{
384 int status;
385 int new_options, retval = -EINVAL;
386 int new_timeout;
387 union {
388 struct watchdog_info __user *ident;
389 int __user *i;
390 } uarg;
391
392 uarg.i = (int __user *)arg;
393
394 switch(cmd)
395 {
396 default:
397 return -ENOIOCTLCMD;
398
399 case WDIOC_GETSUPPORT:
400 return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0;
401
402 case WDIOC_GETSTATUS:
403 wdt_get_status(&status);
404 return put_user(status, uarg.i);
405
406 case WDIOC_GETBOOTSTATUS:
407 return put_user(0, uarg.i);
408
409 case WDIOC_KEEPALIVE:
410 wdt_keepalive();
411 return 0;
412
413 case WDIOC_SETOPTIONS:
414 if (get_user (new_options, uarg.i))
415 return -EFAULT;
416
417 if (new_options & WDIOS_DISABLECARD) {
418 wdt_stop();
419 retval = 0;
420 }
421
422 if (new_options & WDIOS_ENABLECARD) {
423 wdt_start();
424 retval = 0;
425 }
426
427 return retval;
428
429 case WDIOC_SETTIMEOUT:
430 if (get_user(new_timeout, uarg.i))
431 return -EFAULT;
432
433 if (wdt_set_timeout(new_timeout))
434 return -EINVAL;
435
436 wdt_keepalive();
437 /* Fall */
438
439 case WDIOC_GETTIMEOUT:
440 return put_user(timeout, uarg.i);
441
442 }
443}
444
445static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
446 void *unused)
447{
448 if (code==SYS_DOWN || code==SYS_HALT)
449 wdt_stop();
450 return NOTIFY_DONE;
451}
452
453static struct file_operations wdt_fops=
454{
455 .owner = THIS_MODULE,
456 .llseek = no_llseek,
457 .write = wdt_write,
458 .ioctl = wdt_ioctl,
459 .open = wdt_open,
460 .release = wdt_release,
461};
462
463static struct miscdevice wdt_miscdev=
464{
465 .minor = WATCHDOG_MINOR,
466 .name = "watchdog",
467 .fops = &wdt_fops,
468};
469
470static struct notifier_block wdt_notifier = {
471 .notifier_call = wdt_notify_sys,
472};
473
474static int __init w83977f_wdt_init(void)
475{
476 int rc;
477
478 printk(KERN_INFO PFX DRIVER_VERSION);
479
480 spin_lock_init(&spinlock);
481
482 /*
483 * Check that the timeout value is within it's range ;
484 * if not reset to the default
485 */
486 if (wdt_set_timeout(timeout)) {
487 wdt_set_timeout(DEFAULT_TIMEOUT);
488 printk(KERN_INFO PFX "timeout value must be 15<=timeout<=7635, using %d\n",
489 DEFAULT_TIMEOUT);
490 }
491
492 if (!request_region(IO_INDEX_PORT, 2, WATCHDOG_NAME))
493 {
494 printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
495 IO_INDEX_PORT);
496 rc = -EIO;
497 goto err_out;
498 }
499
500 rc = misc_register(&wdt_miscdev);
501 if (rc)
502 {
503 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
504 wdt_miscdev.minor, rc);
505 goto err_out_region;
506 }
507
508 rc = register_reboot_notifier(&wdt_notifier);
509 if (rc)
510 {
511 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n",
512 rc);
513 goto err_out_miscdev;
514 }
515
516 printk(KERN_INFO PFX "initialized. timeout=%d sec (nowayout=%d testmode=%d)\n",
517 timeout, nowayout, testmode);
518
519 return 0;
520
521err_out_miscdev:
522 misc_deregister(&wdt_miscdev);
523err_out_region:
524 release_region(IO_INDEX_PORT,2);
525err_out:
526 return rc;
527}
528
529static void __exit w83977f_wdt_exit(void)
530{
531 wdt_stop();
532 misc_deregister(&wdt_miscdev);
533 unregister_reboot_notifier(&wdt_notifier);
534 release_region(IO_INDEX_PORT,2);
535}
536
537module_init(w83977f_wdt_init);
538module_exit(w83977f_wdt_exit);
539
540MODULE_AUTHOR("Jose Goncalves <jose.goncalves@inov.pt>");
541MODULE_DESCRIPTION("Driver for watchdog timer in W83977F I/O chip");
542MODULE_LICENSE("GPL");
543MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 966632182e2d..9f2f00d82917 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -31,16 +31,19 @@
31#include <linux/connector.h> 31#include <linux/connector.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33 33
34static void cn_queue_wrapper(void *data) 34void cn_queue_wrapper(void *data)
35{ 35{
36 struct cn_callback_entry *cbq = data; 36 struct cn_callback_data *d = data;
37 37
38 cbq->cb->callback(cbq->cb->priv); 38 d->callback(d->callback_priv);
39 cbq->destruct_data(cbq->ddata); 39
40 cbq->ddata = NULL; 40 d->destruct_data(d->ddata);
41 d->ddata = NULL;
42
43 kfree(d->free);
41} 44}
42 45
43static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb) 46static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *))
44{ 47{
45 struct cn_callback_entry *cbq; 48 struct cn_callback_entry *cbq;
46 49
@@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac
50 return NULL; 53 return NULL;
51 } 54 }
52 55
53 cbq->cb = cb; 56 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
54 INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq); 57 memcpy(&cbq->id.id, id, sizeof(struct cb_id));
58 cbq->data.callback = callback;
59
60 INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data);
55 return cbq; 61 return cbq;
56} 62}
57 63
@@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
68 return ((i1->idx == i2->idx) && (i1->val == i2->val)); 74 return ((i1->idx == i2->idx) && (i1->val == i2->val));
69} 75}
70 76
71int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) 77int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *))
72{ 78{
73 struct cn_callback_entry *cbq, *__cbq; 79 struct cn_callback_entry *cbq, *__cbq;
74 int found = 0; 80 int found = 0;
75 81
76 cbq = cn_queue_alloc_callback_entry(cb); 82 cbq = cn_queue_alloc_callback_entry(name, id, callback);
77 if (!cbq) 83 if (!cbq)
78 return -ENOMEM; 84 return -ENOMEM;
79 85
@@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
82 88
83 spin_lock_bh(&dev->queue_lock); 89 spin_lock_bh(&dev->queue_lock);
84 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { 90 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
85 if (cn_cb_equal(&__cbq->cb->id, &cb->id)) { 91 if (cn_cb_equal(&__cbq->id.id, id)) {
86 found = 1; 92 found = 1;
87 break; 93 break;
88 } 94 }
@@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb)
99 105
100 cbq->nls = dev->nls; 106 cbq->nls = dev->nls;
101 cbq->seq = 0; 107 cbq->seq = 0;
102 cbq->group = cbq->cb->id.idx; 108 cbq->group = cbq->id.id.idx;
103 109
104 return 0; 110 return 0;
105} 111}
@@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
111 117
112 spin_lock_bh(&dev->queue_lock); 118 spin_lock_bh(&dev->queue_lock);
113 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { 119 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
114 if (cn_cb_equal(&cbq->cb->id, id)) { 120 if (cn_cb_equal(&cbq->id.id, id)) {
115 list_del(&cbq->callback_entry); 121 list_del(&cbq->callback_entry);
116 found = 1; 122 found = 1;
117 break; 123 break;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index aaf6d468a8b9..bb0b3a8de14b 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -84,7 +84,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask)
84 spin_lock_bh(&dev->cbdev->queue_lock); 84 spin_lock_bh(&dev->cbdev->queue_lock);
85 list_for_each_entry(__cbq, &dev->cbdev->queue_list, 85 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
86 callback_entry) { 86 callback_entry) {
87 if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { 87 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
88 found = 1; 88 found = 1;
89 group = __cbq->group; 89 group = __cbq->group;
90 } 90 }
@@ -127,42 +127,56 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v
127{ 127{
128 struct cn_callback_entry *__cbq; 128 struct cn_callback_entry *__cbq;
129 struct cn_dev *dev = &cdev; 129 struct cn_dev *dev = &cdev;
130 int found = 0; 130 int err = -ENODEV;
131 131
132 spin_lock_bh(&dev->cbdev->queue_lock); 132 spin_lock_bh(&dev->cbdev->queue_lock);
133 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { 133 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
134 if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { 134 if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
135 /*
136 * Let's scream if there is some magic and the
137 * data will arrive asynchronously here.
138 * [i.e. netlink messages will be queued].
139 * After the first warning I will fix it
140 * quickly, but now I think it is
141 * impossible. --zbr (2004_04_27).
142 */
143 if (likely(!test_bit(0, &__cbq->work.pending) && 135 if (likely(!test_bit(0, &__cbq->work.pending) &&
144 __cbq->ddata == NULL)) { 136 __cbq->data.ddata == NULL)) {
145 __cbq->cb->priv = msg; 137 __cbq->data.callback_priv = msg;
146 138
147 __cbq->ddata = data; 139 __cbq->data.ddata = data;
148 __cbq->destruct_data = destruct_data; 140 __cbq->data.destruct_data = destruct_data;
149 141
150 if (queue_work(dev->cbdev->cn_queue, 142 if (queue_work(dev->cbdev->cn_queue,
151 &__cbq->work)) 143 &__cbq->work))
152 found = 1; 144 err = 0;
153 } else { 145 } else {
154 printk("%s: cbq->data=%p, " 146 struct work_struct *w;
155 "work->pending=%08lx.\n", 147 struct cn_callback_data *d;
156 __func__, __cbq->ddata, 148
157 __cbq->work.pending); 149 w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC);
158 WARN_ON(1); 150 if (w) {
151 d = (struct cn_callback_data *)(w+1);
152
153 d->callback_priv = msg;
154 d->callback = __cbq->data.callback;
155 d->ddata = data;
156 d->destruct_data = destruct_data;
157 d->free = w;
158
159 INIT_LIST_HEAD(&w->entry);
160 w->pending = 0;
161 w->func = &cn_queue_wrapper;
162 w->data = d;
163 init_timer(&w->timer);
164
165 if (queue_work(dev->cbdev->cn_queue, w))
166 err = 0;
167 else {
168 kfree(w);
169 err = -EINVAL;
170 }
171 } else
172 err = -ENOMEM;
159 } 173 }
160 break; 174 break;
161 } 175 }
162 } 176 }
163 spin_unlock_bh(&dev->cbdev->queue_lock); 177 spin_unlock_bh(&dev->cbdev->queue_lock);
164 178
165 return found ? 0 : -ENODEV; 179 return err;
166} 180}
167 181
168/* 182/*
@@ -291,22 +305,10 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *))
291{ 305{
292 int err; 306 int err;
293 struct cn_dev *dev = &cdev; 307 struct cn_dev *dev = &cdev;
294 struct cn_callback *cb;
295
296 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
297 if (!cb)
298 return -ENOMEM;
299
300 scnprintf(cb->name, sizeof(cb->name), "%s", name);
301 308
302 memcpy(&cb->id, id, sizeof(cb->id)); 309 err = cn_queue_add_callback(dev->cbdev, name, id, callback);
303 cb->callback = callback; 310 if (err)
304
305 err = cn_queue_add_callback(dev->cbdev, cb);
306 if (err) {
307 kfree(cb);
308 return err; 311 return err;
309 }
310 312
311 cn_notify(id, 0); 313 cn_notify(id, 0);
312 314
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 3b865f34a095..b66782398258 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -50,7 +50,7 @@
50MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>"); 50MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
51MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems"); 51MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53MODULE_VERSION("1.0"); 53MODULE_VERSION("2.0");
54 54
55#define BIOS_SCAN_LIMIT 0xffffffff 55#define BIOS_SCAN_LIMIT 0xffffffff
56#define MAX_IMAGE_LENGTH 16 56#define MAX_IMAGE_LENGTH 16
@@ -65,10 +65,11 @@ static struct _rbu_data {
65 unsigned long packet_write_count; 65 unsigned long packet_write_count;
66 unsigned long num_packets; 66 unsigned long num_packets;
67 unsigned long packetsize; 67 unsigned long packetsize;
68 int entry_created;
68} rbu_data; 69} rbu_data;
69 70
70static char image_type[MAX_IMAGE_LENGTH] = "mono"; 71static char image_type[MAX_IMAGE_LENGTH + 1] = "mono";
71module_param_string(image_type, image_type, sizeof(image_type), 0); 72module_param_string(image_type, image_type, sizeof (image_type), 0);
72MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet"); 73MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet");
73 74
74struct packet_data { 75struct packet_data {
@@ -114,7 +115,7 @@ static int fill_last_packet(void *data, size_t length)
114 115
115 if ((rbu_data.packet_write_count + length) > rbu_data.packetsize) { 116 if ((rbu_data.packet_write_count + length) > rbu_data.packetsize) {
116 pr_debug("dell_rbu:%s: packet size data " 117 pr_debug("dell_rbu:%s: packet size data "
117 "overrun\n", __FUNCTION__); 118 "overrun\n", __FUNCTION__);
118 return -EINVAL; 119 return -EINVAL;
119 } 120 }
120 121
@@ -146,12 +147,14 @@ static int create_packet(size_t length)
146 pr_debug("create_packet: packetsize not specified\n"); 147 pr_debug("create_packet: packetsize not specified\n");
147 return -EINVAL; 148 return -EINVAL;
148 } 149 }
150 spin_unlock(&rbu_data.lock);
151 newpacket = kmalloc(sizeof (struct packet_data), GFP_KERNEL);
152 spin_lock(&rbu_data.lock);
149 153
150 newpacket = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
151 if (!newpacket) { 154 if (!newpacket) {
152 printk(KERN_WARNING 155 printk(KERN_WARNING
153 "dell_rbu:%s: failed to allocate new " 156 "dell_rbu:%s: failed to allocate new "
154 "packet\n", __FUNCTION__); 157 "packet\n", __FUNCTION__);
155 return -ENOMEM; 158 return -ENOMEM;
156 } 159 }
157 160
@@ -160,15 +163,17 @@ static int create_packet(size_t length)
160 * there is no upper limit on memory 163 * there is no upper limit on memory
161 * address for packetized mechanism 164 * address for packetized mechanism
162 */ 165 */
163 newpacket->data = (unsigned char *)__get_free_pages(GFP_KERNEL, 166 spin_unlock(&rbu_data.lock);
164 ordernum); 167 newpacket->data = (unsigned char *) __get_free_pages(GFP_KERNEL,
168 ordernum);
169 spin_lock(&rbu_data.lock);
165 170
166 pr_debug("create_packet: newpacket %p\n", newpacket->data); 171 pr_debug("create_packet: newpacket %p\n", newpacket->data);
167 172
168 if (!newpacket->data) { 173 if (!newpacket->data) {
169 printk(KERN_WARNING 174 printk(KERN_WARNING
170 "dell_rbu:%s: failed to allocate new " 175 "dell_rbu:%s: failed to allocate new "
171 "packet\n", __FUNCTION__); 176 "packet\n", __FUNCTION__);
172 kfree(newpacket); 177 kfree(newpacket);
173 return -ENOMEM; 178 return -ENOMEM;
174 } 179 }
@@ -204,9 +209,8 @@ static int packetize_data(void *data, size_t length)
204 return rc; 209 return rc;
205} 210}
206 211
207static int 212static int do_packet_read(char *data, struct list_head *ptemp_list,
208do_packet_read(char *data, struct list_head *ptemp_list, 213 int length, int bytes_read, int *list_read_count)
209 int length, int bytes_read, int *list_read_count)
210{ 214{
211 void *ptemp_buf; 215 void *ptemp_buf;
212 struct packet_data *newpacket = NULL; 216 struct packet_data *newpacket = NULL;
@@ -239,7 +243,7 @@ do_packet_read(char *data, struct list_head *ptemp_list,
239 return bytes_copied; 243 return bytes_copied;
240} 244}
241 245
242static int packet_read_list(char *data, size_t * pread_length) 246static int packet_read_list(char *data, size_t *pread_length)
243{ 247{
244 struct list_head *ptemp_list; 248 struct list_head *ptemp_list;
245 int temp_count = 0; 249 int temp_count = 0;
@@ -258,8 +262,7 @@ static int packet_read_list(char *data, size_t * pread_length)
258 ptemp_list = (&packet_data_head.list)->next; 262 ptemp_list = (&packet_data_head.list)->next;
259 while (!list_empty(ptemp_list)) { 263 while (!list_empty(ptemp_list)) {
260 bytes_copied = do_packet_read(pdest, ptemp_list, 264 bytes_copied = do_packet_read(pdest, ptemp_list,
261 remaining_bytes, bytes_read, 265 remaining_bytes, bytes_read, &temp_count);
262 &temp_count);
263 remaining_bytes -= bytes_copied; 266 remaining_bytes -= bytes_copied;
264 bytes_read += bytes_copied; 267 bytes_read += bytes_copied;
265 pdest += bytes_copied; 268 pdest += bytes_copied;
@@ -287,7 +290,7 @@ static void packet_empty_list(void)
287 ptemp_list = (&packet_data_head.list)->next; 290 ptemp_list = (&packet_data_head.list)->next;
288 while (!list_empty(ptemp_list)) { 291 while (!list_empty(ptemp_list)) {
289 newpacket = 292 newpacket =
290 list_entry(ptemp_list, struct packet_data, list); 293 list_entry(ptemp_list, struct packet_data, list);
291 pnext_list = ptemp_list->next; 294 pnext_list = ptemp_list->next;
292 list_del(ptemp_list); 295 list_del(ptemp_list);
293 ptemp_list = pnext_list; 296 ptemp_list = pnext_list;
@@ -296,8 +299,8 @@ static void packet_empty_list(void)
296 * to make sure there are no stale RBU packets left in memory 299 * to make sure there are no stale RBU packets left in memory
297 */ 300 */
298 memset(newpacket->data, 0, rbu_data.packetsize); 301 memset(newpacket->data, 0, rbu_data.packetsize);
299 free_pages((unsigned long)newpacket->data, 302 free_pages((unsigned long) newpacket->data,
300 newpacket->ordernum); 303 newpacket->ordernum);
301 kfree(newpacket); 304 kfree(newpacket);
302 } 305 }
303 rbu_data.packet_write_count = 0; 306 rbu_data.packet_write_count = 0;
@@ -319,14 +322,13 @@ static void img_update_free(void)
319 * BIOS image copied in memory. 322 * BIOS image copied in memory.
320 */ 323 */
321 memset(rbu_data.image_update_buffer, 0, 324 memset(rbu_data.image_update_buffer, 0,
322 rbu_data.image_update_buffer_size); 325 rbu_data.image_update_buffer_size);
323 if (rbu_data.dma_alloc == 1) 326 if (rbu_data.dma_alloc == 1)
324 dma_free_coherent(NULL, rbu_data.bios_image_size, 327 dma_free_coherent(NULL, rbu_data.bios_image_size,
325 rbu_data.image_update_buffer, 328 rbu_data.image_update_buffer, dell_rbu_dmaaddr);
326 dell_rbu_dmaaddr);
327 else 329 else
328 free_pages((unsigned long)rbu_data.image_update_buffer, 330 free_pages((unsigned long) rbu_data.image_update_buffer,
329 rbu_data.image_update_ordernum); 331 rbu_data.image_update_ordernum);
330 332
331 /* 333 /*
332 * Re-initialize the rbu_data variables after a free 334 * Re-initialize the rbu_data variables after a free
@@ -366,7 +368,7 @@ static int img_update_realloc(unsigned long size)
366 */ 368 */
367 if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { 369 if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
368 printk(KERN_ERR "dell_rbu:%s: corruption " 370 printk(KERN_ERR "dell_rbu:%s: corruption "
369 "check failed\n", __FUNCTION__); 371 "check failed\n", __FUNCTION__);
370 return -EINVAL; 372 return -EINVAL;
371 } 373 }
372 /* 374 /*
@@ -385,17 +387,16 @@ static int img_update_realloc(unsigned long size)
385 387
386 ordernum = get_order(size); 388 ordernum = get_order(size);
387 image_update_buffer = 389 image_update_buffer =
388 (unsigned char *)__get_free_pages(GFP_KERNEL, ordernum); 390 (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum);
389 391
390 img_buf_phys_addr = 392 img_buf_phys_addr =
391 (unsigned long)virt_to_phys(image_update_buffer); 393 (unsigned long) virt_to_phys(image_update_buffer);
392 394
393 if (img_buf_phys_addr > BIOS_SCAN_LIMIT) { 395 if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
394 free_pages((unsigned long)image_update_buffer, ordernum); 396 free_pages((unsigned long) image_update_buffer, ordernum);
395 ordernum = -1; 397 ordernum = -1;
396 image_update_buffer = dma_alloc_coherent(NULL, size, 398 image_update_buffer = dma_alloc_coherent(NULL, size,
397 &dell_rbu_dmaaddr, 399 &dell_rbu_dmaaddr, GFP_KERNEL);
398 GFP_KERNEL);
399 dma_alloc = 1; 400 dma_alloc = 1;
400 } 401 }
401 402
@@ -405,13 +406,13 @@ static int img_update_realloc(unsigned long size)
405 rbu_data.image_update_buffer = image_update_buffer; 406 rbu_data.image_update_buffer = image_update_buffer;
406 rbu_data.image_update_buffer_size = size; 407 rbu_data.image_update_buffer_size = size;
407 rbu_data.bios_image_size = 408 rbu_data.bios_image_size =
408 rbu_data.image_update_buffer_size; 409 rbu_data.image_update_buffer_size;
409 rbu_data.image_update_ordernum = ordernum; 410 rbu_data.image_update_ordernum = ordernum;
410 rbu_data.dma_alloc = dma_alloc; 411 rbu_data.dma_alloc = dma_alloc;
411 rc = 0; 412 rc = 0;
412 } else { 413 } else {
413 pr_debug("Not enough memory for image update:" 414 pr_debug("Not enough memory for image update:"
414 "size = %ld\n", size); 415 "size = %ld\n", size);
415 rc = -ENOMEM; 416 rc = -ENOMEM;
416 } 417 }
417 418
@@ -438,7 +439,7 @@ static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
438 if (pos > imagesize) { 439 if (pos > imagesize) {
439 retval = 0; 440 retval = 0;
440 printk(KERN_WARNING "dell_rbu:read_packet_data: " 441 printk(KERN_WARNING "dell_rbu:read_packet_data: "
441 "data underrun\n"); 442 "data underrun\n");
442 goto read_rbu_data_exit; 443 goto read_rbu_data_exit;
443 } 444 }
444 445
@@ -468,11 +469,11 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
468 469
469 /* check to see if we have something to return */ 470 /* check to see if we have something to return */
470 if ((rbu_data.image_update_buffer == NULL) || 471 if ((rbu_data.image_update_buffer == NULL) ||
471 (rbu_data.bios_image_size == 0)) { 472 (rbu_data.bios_image_size == 0)) {
472 pr_debug("read_rbu_data_mono: image_update_buffer %p ," 473 pr_debug("read_rbu_data_mono: image_update_buffer %p ,"
473 "bios_image_size %lu\n", 474 "bios_image_size %lu\n",
474 rbu_data.image_update_buffer, 475 rbu_data.image_update_buffer,
475 rbu_data.bios_image_size); 476 rbu_data.bios_image_size);
476 ret_count = -ENOMEM; 477 ret_count = -ENOMEM;
477 goto read_rbu_data_exit; 478 goto read_rbu_data_exit;
478 } 479 }
@@ -497,8 +498,8 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
497 return ret_count; 498 return ret_count;
498} 499}
499 500
500static ssize_t 501static ssize_t read_rbu_data(struct kobject *kobj, char *buffer,
501read_rbu_data(struct kobject *kobj, char *buffer, loff_t pos, size_t count) 502 loff_t pos, size_t count)
502{ 503{
503 ssize_t ret_count = 0; 504 ssize_t ret_count = 0;
504 505
@@ -515,62 +516,20 @@ read_rbu_data(struct kobject *kobj, char *buffer, loff_t pos, size_t count)
515 return ret_count; 516 return ret_count;
516} 517}
517 518
518static ssize_t
519read_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
520 size_t count)
521{
522 int size = 0;
523 if (!pos)
524 size = sprintf(buffer, "%s\n", image_type);
525 return size;
526}
527
528static ssize_t
529write_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
530 size_t count)
531{
532 int rc = count;
533 spin_lock(&rbu_data.lock);
534
535 if (strlen(buffer) < MAX_IMAGE_LENGTH)
536 sscanf(buffer, "%s", image_type);
537 else
538 printk(KERN_WARNING "dell_rbu: image_type is invalid"
539 "max chars = %d, \n incoming str--%s-- \n",
540 MAX_IMAGE_LENGTH, buffer);
541
542 /* we must free all previous allocations */
543 packet_empty_list();
544 img_update_free();
545
546 spin_unlock(&rbu_data.lock);
547 return rc;
548
549}
550
551static struct bin_attribute rbu_data_attr = {
552 .attr = {.name = "data",.owner = THIS_MODULE,.mode = 0444},
553 .read = read_rbu_data,
554};
555
556static struct bin_attribute rbu_image_type_attr = {
557 .attr = {.name = "image_type",.owner = THIS_MODULE,.mode = 0644},
558 .read = read_rbu_image_type,
559 .write = write_rbu_image_type,
560};
561
562static void callbackfn_rbu(const struct firmware *fw, void *context) 519static void callbackfn_rbu(const struct firmware *fw, void *context)
563{ 520{
564 int rc = 0; 521 int rc = 0;
565 522
566 if (!fw || !fw->size) 523 if (!fw || !fw->size) {
524 rbu_data.entry_created = 0;
567 return; 525 return;
526 }
568 527
569 spin_lock(&rbu_data.lock); 528 spin_lock(&rbu_data.lock);
570 if (!strcmp(image_type, "mono")) { 529 if (!strcmp(image_type, "mono")) {
571 if (!img_update_realloc(fw->size)) 530 if (!img_update_realloc(fw->size))
572 memcpy(rbu_data.image_update_buffer, 531 memcpy(rbu_data.image_update_buffer,
573 fw->data, fw->size); 532 fw->data, fw->size);
574 } else if (!strcmp(image_type, "packet")) { 533 } else if (!strcmp(image_type, "packet")) {
575 if (!rbu_data.packetsize) 534 if (!rbu_data.packetsize)
576 rbu_data.packetsize = fw->size; 535 rbu_data.packetsize = fw->size;
@@ -584,14 +543,103 @@ static void callbackfn_rbu(const struct firmware *fw, void *context)
584 spin_unlock(&rbu_data.lock); 543 spin_unlock(&rbu_data.lock);
585 544
586 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, 545 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
587 "dell_rbu", &rbu_device->dev, 546 "dell_rbu", &rbu_device->dev, &context, callbackfn_rbu);
588 &context, callbackfn_rbu);
589 if (rc) 547 if (rc)
590 printk(KERN_ERR 548 printk(KERN_ERR
591 "dell_rbu:%s request_firmware_nowait failed" 549 "dell_rbu:%s request_firmware_nowait failed"
592 " %d\n", __FUNCTION__, rc); 550 " %d\n", __FUNCTION__, rc);
551 else
552 rbu_data.entry_created = 1;
553}
554
555static ssize_t read_rbu_image_type(struct kobject *kobj, char *buffer,
556 loff_t pos, size_t count)
557{
558 int size = 0;
559 if (!pos)
560 size = sprintf(buffer, "%s\n", image_type);
561 return size;
562}
563
564static ssize_t write_rbu_image_type(struct kobject *kobj, char *buffer,
565 loff_t pos, size_t count)
566{
567 int rc = count;
568 int req_firm_rc = 0;
569 int i;
570 spin_lock(&rbu_data.lock);
571 /*
572 * Find the first newline or space
573 */
574 for (i = 0; i < count; ++i)
575 if (buffer[i] == '\n' || buffer[i] == ' ') {
576 buffer[i] = '\0';
577 break;
578 }
579 if (i == count)
580 buffer[count] = '\0';
581
582 if (strstr(buffer, "mono"))
583 strcpy(image_type, "mono");
584 else if (strstr(buffer, "packet"))
585 strcpy(image_type, "packet");
586 else if (strstr(buffer, "init")) {
587 /*
588 * If due to the user error the driver gets in a bad
589 * state where even though it is loaded , the
590 * /sys/class/firmware/dell_rbu entries are missing.
591 * to cover this situation the user can recreate entries
592 * by writing init to image_type.
593 */
594 if (!rbu_data.entry_created) {
595 spin_unlock(&rbu_data.lock);
596 req_firm_rc = request_firmware_nowait(THIS_MODULE,
597 FW_ACTION_NOHOTPLUG, "dell_rbu",
598 &rbu_device->dev, &context,
599 callbackfn_rbu);
600 if (req_firm_rc) {
601 printk(KERN_ERR
602 "dell_rbu:%s request_firmware_nowait"
603 " failed %d\n", __FUNCTION__, rc);
604 rc = -EIO;
605 } else
606 rbu_data.entry_created = 1;
607
608 spin_lock(&rbu_data.lock);
609 }
610 } else {
611 printk(KERN_WARNING "dell_rbu: image_type is invalid\n");
612 spin_unlock(&rbu_data.lock);
613 return -EINVAL;
614 }
615
616 /* we must free all previous allocations */
617 packet_empty_list();
618 img_update_free();
619 spin_unlock(&rbu_data.lock);
620
621 return rc;
593} 622}
594 623
624static struct bin_attribute rbu_data_attr = {
625 .attr = {
626 .name = "data",
627 .owner = THIS_MODULE,
628 .mode = 0444,
629 },
630 .read = read_rbu_data,
631};
632
633static struct bin_attribute rbu_image_type_attr = {
634 .attr = {
635 .name = "image_type",
636 .owner = THIS_MODULE,
637 .mode = 0644,
638 },
639 .read = read_rbu_image_type,
640 .write = write_rbu_image_type,
641};
642
595static int __init dcdrbu_init(void) 643static int __init dcdrbu_init(void)
596{ 644{
597 int rc = 0; 645 int rc = 0;
@@ -599,11 +647,11 @@ static int __init dcdrbu_init(void)
599 647
600 init_packet_head(); 648 init_packet_head();
601 rbu_device = 649 rbu_device =
602 platform_device_register_simple("dell_rbu", -1, NULL, 0); 650 platform_device_register_simple("dell_rbu", -1, NULL, 0);
603 if (!rbu_device) { 651 if (!rbu_device) {
604 printk(KERN_ERR 652 printk(KERN_ERR
605 "dell_rbu:%s:platform_device_register_simple " 653 "dell_rbu:%s:platform_device_register_simple "
606 "failed\n", __FUNCTION__); 654 "failed\n", __FUNCTION__);
607 return -EIO; 655 return -EIO;
608 } 656 }
609 657
@@ -611,11 +659,12 @@ static int __init dcdrbu_init(void)
611 sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr); 659 sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
612 660
613 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, 661 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
614 "dell_rbu", &rbu_device->dev, 662 "dell_rbu", &rbu_device->dev, &context, callbackfn_rbu);
615 &context, callbackfn_rbu);
616 if (rc) 663 if (rc)
617 printk(KERN_ERR "dell_rbu:%s:request_firmware_nowait" 664 printk(KERN_ERR "dell_rbu:%s:request_firmware_nowait"
618 " failed %d\n", __FUNCTION__, rc); 665 " failed %d\n", __FUNCTION__, rc);
666 else
667 rbu_data.entry_created = 1;
619 668
620 return rc; 669 return rc;
621 670
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7e72e922b41c..db358cfa7cbf 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -418,12 +418,11 @@ config SENSORS_HDAPS
418 help 418 help
419 This driver provides support for the IBM Hard Drive Active Protection 419 This driver provides support for the IBM Hard Drive Active Protection
420 System (hdaps), which provides an accelerometer and other misc. data. 420 System (hdaps), which provides an accelerometer and other misc. data.
421 Supported laptops include the IBM ThinkPad T41, T42, T43, and R51. 421 ThinkPads starting with the R50, T41, and X40 are supported. The
422 The accelerometer data is readable via sysfs. 422 accelerometer data is readable via sysfs.
423 423
424 This driver also provides an input class device, allowing the 424 This driver also provides an absolute input class device, allowing
425 laptop to act as a pinball machine-esque mouse. This is off by 425 the laptop to act as a pinball machine-esque joystick.
426 default but enabled via sysfs or the module parameter "mousedev".
427 426
428 Say Y here if you have an applicable laptop and want to experience 427 Say Y here if you have an applicable laptop and want to experience
429 the awesome power of hdaps. 428 the awesome power of hdaps.
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index eaebfc14c933..7f0107613827 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -4,11 +4,11 @@
4 * Copyright (C) 2005 Robert Love <rml@novell.com> 4 * Copyright (C) 2005 Robert Love <rml@novell.com>
5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> 5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
6 * 6 *
7 * The HardDisk Active Protection System (hdaps) is present in the IBM ThinkPad 7 * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
8 * T41, T42, T43, R51, and X40, at least. It provides a basic two-axis 8 * starting with the R40, T41, and X40. It provides a basic two-axis
9 * accelerometer and other data, such as the device's temperature. 9 * accelerometer and other data, such as the device's temperature.
10 * 10 *
11 * Based on the document by Mark A. Smith available at 11 * This driver is based on the document by Mark A. Smith available at
12 * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial 12 * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial
13 * and error. 13 * and error.
14 * 14 *
@@ -36,12 +36,7 @@
36#include <asm/io.h> 36#include <asm/io.h>
37 37
38#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */ 38#define HDAPS_LOW_PORT 0x1600 /* first port used by hdaps */
39#define HDAPS_NR_PORTS 0x30 /* 0x1600 - 0x162f */ 39#define HDAPS_NR_PORTS 0x30 /* number of ports: 0x1600 - 0x162f */
40
41#define STATE_FRESH 0x50 /* accelerometer data is fresh */
42
43#define REFRESH_ASYNC 0x00 /* do asynchronous refresh */
44#define REFRESH_SYNC 0x01 /* do synchronous refresh */
45 40
46#define HDAPS_PORT_STATE 0x1611 /* device state */ 41#define HDAPS_PORT_STATE 0x1611 /* device state */
47#define HDAPS_PORT_YPOS 0x1612 /* y-axis position */ 42#define HDAPS_PORT_YPOS 0x1612 /* y-axis position */
@@ -53,7 +48,7 @@
53#define HDAPS_PORT_UNKNOWN 0x161c /* what is this? */ 48#define HDAPS_PORT_UNKNOWN 0x161c /* what is this? */
54#define HDAPS_PORT_KMACT 0x161d /* keyboard or mouse activity */ 49#define HDAPS_PORT_KMACT 0x161d /* keyboard or mouse activity */
55 50
56#define HDAPS_READ_MASK 0xff /* some reads have the low 8 bits set */ 51#define STATE_FRESH 0x50 /* accelerometer data is fresh */
57 52
58#define KEYBD_MASK 0x20 /* set if keyboard activity */ 53#define KEYBD_MASK 0x20 /* set if keyboard activity */
59#define MOUSE_MASK 0x40 /* set if mouse activity */ 54#define MOUSE_MASK 0x40 /* set if mouse activity */
@@ -63,12 +58,11 @@
63#define INIT_TIMEOUT_MSECS 4000 /* wait up to 4s for device init ... */ 58#define INIT_TIMEOUT_MSECS 4000 /* wait up to 4s for device init ... */
64#define INIT_WAIT_MSECS 200 /* ... in 200ms increments */ 59#define INIT_WAIT_MSECS 200 /* ... in 200ms increments */
65 60
66static struct platform_device *pdev; 61#define HDAPS_POLL_PERIOD (HZ/20) /* poll for input every 1/20s */
67static struct input_dev hdaps_idev; 62#define HDAPS_INPUT_FUZZ 4 /* input event threshold */
63
68static struct timer_list hdaps_timer; 64static struct timer_list hdaps_timer;
69static unsigned int hdaps_mousedev_threshold = 4; 65static struct platform_device *pdev;
70static unsigned long hdaps_poll_ms = 50;
71static unsigned int hdaps_mousedev;
72static unsigned int hdaps_invert; 66static unsigned int hdaps_invert;
73static u8 km_activity; 67static u8 km_activity;
74static int rest_x; 68static int rest_x;
@@ -81,14 +75,14 @@ static DECLARE_MUTEX(hdaps_sem);
81 */ 75 */
82static inline u8 __get_latch(u16 port) 76static inline u8 __get_latch(u16 port)
83{ 77{
84 return inb(port) & HDAPS_READ_MASK; 78 return inb(port) & 0xff;
85} 79}
86 80
87/* 81/*
88 * __check_latch - Check a port latch for a given value. Callers must hold 82 * __check_latch - Check a port latch for a given value. Returns zero if the
89 * hdaps_sem. Returns zero if the port contains the given value. 83 * port contains the given value. Callers must hold hdaps_sem.
90 */ 84 */
91static inline unsigned int __check_latch(u16 port, u8 val) 85static inline int __check_latch(u16 port, u8 val)
92{ 86{
93 if (__get_latch(port) == val) 87 if (__get_latch(port) == val)
94 return 0; 88 return 0;
@@ -99,7 +93,7 @@ static inline unsigned int __check_latch(u16 port, u8 val)
99 * __wait_latch - Wait up to 100us for a port latch to get a certain value, 93 * __wait_latch - Wait up to 100us for a port latch to get a certain value,
100 * returning zero if the value is obtained. Callers must hold hdaps_sem. 94 * returning zero if the value is obtained. Callers must hold hdaps_sem.
101 */ 95 */
102static unsigned int __wait_latch(u16 port, u8 val) 96static int __wait_latch(u16 port, u8 val)
103{ 97{
104 unsigned int i; 98 unsigned int i;
105 99
@@ -109,59 +103,42 @@ static unsigned int __wait_latch(u16 port, u8 val)
109 udelay(5); 103 udelay(5);
110 } 104 }
111 105
112 return -EINVAL; 106 return -EIO;
113} 107}
114 108
115/* 109/*
116 * __device_refresh - Request a refresh from the accelerometer. 110 * __device_refresh - request a refresh from the accelerometer. Does not wait
117 * 111 * for refresh to complete. Callers must hold hdaps_sem.
118 * If sync is REFRESH_SYNC, we perform a synchronous refresh and will wait.
119 * Returns zero if successful and nonzero on error.
120 *
121 * If sync is REFRESH_ASYNC, we merely kick off a new refresh if the device is
122 * not up-to-date. Always returns zero.
123 *
124 * Callers must hold hdaps_sem.
125 */ 112 */
126static int __device_refresh(unsigned int sync) 113static void __device_refresh(void)
127{ 114{
128 u8 state; 115 udelay(200);
129 116 if (inb(0x1604) != STATE_FRESH) {
130 udelay(100); 117 outb(0x11, 0x1610);
131 118 outb(0x01, 0x161f);
132 state = inb(0x1604); 119 }
133 if (state == STATE_FRESH) 120}
134 return 0;
135
136 outb(0x11, 0x1610);
137 outb(0x01, 0x161f);
138 if (sync == REFRESH_ASYNC)
139 return 0;
140 121
122/*
123 * __device_refresh_sync - request a synchronous refresh from the
124 * accelerometer. We wait for the refresh to complete. Returns zero if
125 * successful and nonzero on error. Callers must hold hdaps_sem.
126 */
127static int __device_refresh_sync(void)
128{
129 __device_refresh();
141 return __wait_latch(0x1604, STATE_FRESH); 130 return __wait_latch(0x1604, STATE_FRESH);
142} 131}
143 132
144/* 133/*
145 * __device_complete - Indicate to the accelerometer that we are done reading 134 * __device_complete - indicate to the accelerometer that we are done reading
146 * data, and then initiate an async refresh. Callers must hold hdaps_sem. 135 * data, and then initiate an async refresh. Callers must hold hdaps_sem.
147 */ 136 */
148static inline void __device_complete(void) 137static inline void __device_complete(void)
149{ 138{
150 inb(0x161f); 139 inb(0x161f);
151 inb(0x1604); 140 inb(0x1604);
152 __device_refresh(REFRESH_ASYNC); 141 __device_refresh();
153}
154
155static int __hdaps_readb_one(unsigned int port, u8 *val)
156{
157 /* do a sync refresh -- we need to be sure that we read fresh data */
158 if (__device_refresh(REFRESH_SYNC))
159 return -EIO;
160
161 *val = inb(port);
162 __device_complete();
163
164 return 0;
165} 142}
166 143
167/* 144/*
@@ -174,17 +151,26 @@ static int hdaps_readb_one(unsigned int port, u8 *val)
174 int ret; 151 int ret;
175 152
176 down(&hdaps_sem); 153 down(&hdaps_sem);
177 ret = __hdaps_readb_one(port, val);
178 up(&hdaps_sem);
179 154
155 /* do a sync refresh -- we need to be sure that we read fresh data */
156 ret = __device_refresh_sync();
157 if (ret)
158 goto out;
159
160 *val = inb(port);
161 __device_complete();
162
163out:
164 up(&hdaps_sem);
180 return ret; 165 return ret;
181} 166}
182 167
168/* __hdaps_read_pair - internal lockless helper for hdaps_read_pair(). */
183static int __hdaps_read_pair(unsigned int port1, unsigned int port2, 169static int __hdaps_read_pair(unsigned int port1, unsigned int port2,
184 int *x, int *y) 170 int *x, int *y)
185{ 171{
186 /* do a sync refresh -- we need to be sure that we read fresh data */ 172 /* do a sync refresh -- we need to be sure that we read fresh data */
187 if (__device_refresh(REFRESH_SYNC)) 173 if (__device_refresh_sync())
188 return -EIO; 174 return -EIO;
189 175
190 *y = inw(port2); 176 *y = inw(port2);
@@ -217,11 +203,13 @@ static int hdaps_read_pair(unsigned int port1, unsigned int port2,
217 return ret; 203 return ret;
218} 204}
219 205
220/* initialize the accelerometer */ 206/*
207 * hdaps_device_init - initialize the accelerometer. Returns zero on success
208 * and negative error code on failure. Can sleep.
209 */
221static int hdaps_device_init(void) 210static int hdaps_device_init(void)
222{ 211{
223 unsigned int total_msecs = INIT_TIMEOUT_MSECS; 212 int total, ret = -ENXIO;
224 int ret = -ENXIO;
225 213
226 down(&hdaps_sem); 214 down(&hdaps_sem);
227 215
@@ -231,8 +219,10 @@ static int hdaps_device_init(void)
231 goto out; 219 goto out;
232 220
233 /* 221 /*
234 * The 0x03 value appears to only work on some thinkpads, such as the 222 * Most ThinkPads return 0x01.
235 * T42p. Others return 0x01. 223 *
224 * Others--namely the R50p, T41p, and T42p--return 0x03. These laptops
225 * have "inverted" axises.
236 * 226 *
237 * The 0x02 value occurs when the chip has been previously initialized. 227 * The 0x02 value occurs when the chip has been previously initialized.
238 */ 228 */
@@ -267,24 +257,23 @@ static int hdaps_device_init(void)
267 outb(0x01, 0x161f); 257 outb(0x01, 0x161f);
268 if (__wait_latch(0x161f, 0x00)) 258 if (__wait_latch(0x161f, 0x00))
269 goto out; 259 goto out;
270 if (__device_refresh(REFRESH_SYNC)) 260 if (__device_refresh_sync())
271 goto out; 261 goto out;
272 if (__wait_latch(0x1611, 0x00)) 262 if (__wait_latch(0x1611, 0x00))
273 goto out; 263 goto out;
274 264
275 /* we have done our dance, now let's wait for the applause */ 265 /* we have done our dance, now let's wait for the applause */
276 while (total_msecs > 0) { 266 for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) {
277 u8 ignored; 267 int x, y;
278 268
279 /* a read of the device helps push it into action */ 269 /* a read of the device helps push it into action */
280 __hdaps_readb_one(HDAPS_PORT_UNKNOWN, &ignored); 270 __hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y);
281 if (!__wait_latch(0x1611, 0x02)) { 271 if (!__wait_latch(0x1611, 0x02)) {
282 ret = 0; 272 ret = 0;
283 break; 273 break;
284 } 274 }
285 275
286 msleep(INIT_WAIT_MSECS); 276 msleep(INIT_WAIT_MSECS);
287 total_msecs -= INIT_WAIT_MSECS;
288 } 277 }
289 278
290out: 279out:
@@ -293,96 +282,6 @@ out:
293} 282}
294 283
295 284
296/* Input class stuff */
297
298/*
299 * hdaps_calibrate - Zero out our "resting" values. Callers must hold hdaps_sem.
300 */
301static void hdaps_calibrate(void)
302{
303 int x, y;
304
305 if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
306 return;
307
308 rest_x = x;
309 rest_y = y;
310}
311
312static void hdaps_mousedev_poll(unsigned long unused)
313{
314 int x, y;
315
316 /* Cannot sleep. Try nonblockingly. If we fail, try again later. */
317 if (down_trylock(&hdaps_sem)) {
318 mod_timer(&hdaps_timer,jiffies+msecs_to_jiffies(hdaps_poll_ms));
319 return;
320 }
321
322 if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
323 goto out;
324
325 x -= rest_x;
326 y -= rest_y;
327 if (abs(x) > hdaps_mousedev_threshold)
328 input_report_rel(&hdaps_idev, REL_X, x);
329 if (abs(y) > hdaps_mousedev_threshold)
330 input_report_rel(&hdaps_idev, REL_Y, y);
331 input_sync(&hdaps_idev);
332
333 mod_timer(&hdaps_timer, jiffies + msecs_to_jiffies(hdaps_poll_ms));
334
335out:
336 up(&hdaps_sem);
337}
338
339/*
340 * hdaps_mousedev_enable - enable the input class device. Can sleep.
341 */
342static void hdaps_mousedev_enable(void)
343{
344 down(&hdaps_sem);
345
346 /* calibrate the device before enabling */
347 hdaps_calibrate();
348
349 /* initialize the input class */
350 init_input_dev(&hdaps_idev);
351 hdaps_idev.dev = &pdev->dev;
352 hdaps_idev.evbit[0] = BIT(EV_KEY) | BIT(EV_REL);
353 hdaps_idev.relbit[0] = BIT(REL_X) | BIT(REL_Y);
354 hdaps_idev.keybit[LONG(BTN_LEFT)] = BIT(BTN_LEFT);
355 input_register_device(&hdaps_idev);
356
357 /* start up our timer */
358 init_timer(&hdaps_timer);
359 hdaps_timer.function = hdaps_mousedev_poll;
360 hdaps_timer.expires = jiffies + msecs_to_jiffies(hdaps_poll_ms);
361 add_timer(&hdaps_timer);
362
363 hdaps_mousedev = 1;
364
365 up(&hdaps_sem);
366
367 printk(KERN_INFO "hdaps: input device enabled.\n");
368}
369
370/*
371 * hdaps_mousedev_disable - disable the input class device. Caller must hold
372 * hdaps_sem.
373 */
374static void hdaps_mousedev_disable(void)
375{
376 down(&hdaps_sem);
377 if (hdaps_mousedev) {
378 hdaps_mousedev = 0;
379 del_timer_sync(&hdaps_timer);
380 input_unregister_device(&hdaps_idev);
381 }
382 up(&hdaps_sem);
383}
384
385
386/* Device model stuff */ 285/* Device model stuff */
387 286
388static int hdaps_probe(struct device *dev) 287static int hdaps_probe(struct device *dev)
@@ -412,6 +311,49 @@ static struct device_driver hdaps_driver = {
412 .resume = hdaps_resume 311 .resume = hdaps_resume
413}; 312};
414 313
314/* Input class stuff */
315
316static struct input_dev hdaps_idev = {
317 .name = "hdaps",
318 .evbit = { BIT(EV_ABS) },
319 .absbit = { BIT(ABS_X) | BIT(ABS_Y) },
320 .absmin = { [ABS_X] = -256, [ABS_Y] = -256 },
321 .absmax = { [ABS_X] = 256, [ABS_Y] = 256 },
322 .absfuzz = { [ABS_X] = HDAPS_INPUT_FUZZ, [ABS_Y] = HDAPS_INPUT_FUZZ },
323 .absflat = { [ABS_X] = HDAPS_INPUT_FUZZ, [ABS_Y] = HDAPS_INPUT_FUZZ },
324};
325
326/*
327 * hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_sem.
328 */
329static void hdaps_calibrate(void)
330{
331 __hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
332}
333
334static void hdaps_mousedev_poll(unsigned long unused)
335{
336 int x, y;
337
338 /* Cannot sleep. Try nonblockingly. If we fail, try again later. */
339 if (down_trylock(&hdaps_sem)) {
340 mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD);
341 return;
342 }
343
344 if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
345 goto out;
346
347 input_report_abs(&hdaps_idev, ABS_X, x - rest_x);
348 input_report_abs(&hdaps_idev, ABS_Y, y - rest_y);
349 input_sync(&hdaps_idev);
350
351 mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
352
353out:
354 up(&hdaps_sem);
355}
356
415 357
416/* Sysfs Files */ 358/* Sysfs Files */
417 359
@@ -517,69 +459,6 @@ static ssize_t hdaps_invert_store(struct device *dev,
517 return count; 459 return count;
518} 460}
519 461
520static ssize_t hdaps_mousedev_show(struct device *dev,
521 struct device_attribute *attr, char *buf)
522{
523 return sprintf(buf, "%d\n", hdaps_mousedev);
524}
525
526static ssize_t hdaps_mousedev_store(struct device *dev,
527 struct device_attribute *attr,
528 const char *buf, size_t count)
529{
530 int enable;
531
532 if (sscanf(buf, "%d", &enable) != 1)
533 return -EINVAL;
534
535 if (enable == 1)
536 hdaps_mousedev_enable();
537 else if (enable == 0)
538 hdaps_mousedev_disable();
539 else
540 return -EINVAL;
541
542 return count;
543}
544
545static ssize_t hdaps_poll_show(struct device *dev,
546 struct device_attribute *attr, char *buf)
547{
548 return sprintf(buf, "%lu\n", hdaps_poll_ms);
549}
550
551static ssize_t hdaps_poll_store(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf, size_t count)
554{
555 unsigned int poll;
556
557 if (sscanf(buf, "%u", &poll) != 1 || poll == 0)
558 return -EINVAL;
559 hdaps_poll_ms = poll;
560
561 return count;
562}
563
564static ssize_t hdaps_threshold_show(struct device *dev,
565 struct device_attribute *attr, char *buf)
566{
567 return sprintf(buf, "%u\n", hdaps_mousedev_threshold);
568}
569
570static ssize_t hdaps_threshold_store(struct device *dev,
571 struct device_attribute *attr,
572 const char *buf, size_t count)
573{
574 unsigned int threshold;
575
576 if (sscanf(buf, "%u", &threshold) != 1 || threshold == 0)
577 return -EINVAL;
578 hdaps_mousedev_threshold = threshold;
579
580 return count;
581}
582
583static DEVICE_ATTR(position, 0444, hdaps_position_show, NULL); 462static DEVICE_ATTR(position, 0444, hdaps_position_show, NULL);
584static DEVICE_ATTR(variance, 0444, hdaps_variance_show, NULL); 463static DEVICE_ATTR(variance, 0444, hdaps_variance_show, NULL);
585static DEVICE_ATTR(temp1, 0444, hdaps_temp1_show, NULL); 464static DEVICE_ATTR(temp1, 0444, hdaps_temp1_show, NULL);
@@ -588,10 +467,6 @@ static DEVICE_ATTR(keyboard_activity, 0444, hdaps_keyboard_activity_show, NULL);
588static DEVICE_ATTR(mouse_activity, 0444, hdaps_mouse_activity_show, NULL); 467static DEVICE_ATTR(mouse_activity, 0444, hdaps_mouse_activity_show, NULL);
589static DEVICE_ATTR(calibrate, 0644, hdaps_calibrate_show,hdaps_calibrate_store); 468static DEVICE_ATTR(calibrate, 0644, hdaps_calibrate_show,hdaps_calibrate_store);
590static DEVICE_ATTR(invert, 0644, hdaps_invert_show, hdaps_invert_store); 469static DEVICE_ATTR(invert, 0644, hdaps_invert_show, hdaps_invert_store);
591static DEVICE_ATTR(mousedev, 0644, hdaps_mousedev_show, hdaps_mousedev_store);
592static DEVICE_ATTR(mousedev_poll_ms, 0644, hdaps_poll_show, hdaps_poll_store);
593static DEVICE_ATTR(mousedev_threshold, 0644, hdaps_threshold_show,
594 hdaps_threshold_store);
595 470
596static struct attribute *hdaps_attributes[] = { 471static struct attribute *hdaps_attributes[] = {
597 &dev_attr_position.attr, 472 &dev_attr_position.attr,
@@ -601,9 +476,6 @@ static struct attribute *hdaps_attributes[] = {
601 &dev_attr_keyboard_activity.attr, 476 &dev_attr_keyboard_activity.attr,
602 &dev_attr_mouse_activity.attr, 477 &dev_attr_mouse_activity.attr,
603 &dev_attr_calibrate.attr, 478 &dev_attr_calibrate.attr,
604 &dev_attr_mousedev.attr,
605 &dev_attr_mousedev_threshold.attr,
606 &dev_attr_mousedev_poll_ms.attr,
607 &dev_attr_invert.attr, 479 &dev_attr_invert.attr,
608 NULL, 480 NULL,
609}; 481};
@@ -615,24 +487,19 @@ static struct attribute_group hdaps_attribute_group = {
615 487
616/* Module stuff */ 488/* Module stuff */
617 489
618/* 490/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */
619 * XXX: We should be able to return nonzero and halt the detection process.
620 * But there is a bug in dmi_check_system() where a nonzero return from the
621 * first match will result in a return of failure from dmi_check_system().
622 * I fixed this; the patch is in 2.6-mm. Once in Linus's tree we can make
623 * hdaps_dmi_match_invert() return hdaps_dmi_match(), which in turn returns 1.
624 */
625static int hdaps_dmi_match(struct dmi_system_id *id) 491static int hdaps_dmi_match(struct dmi_system_id *id)
626{ 492{
627 printk(KERN_INFO "hdaps: %s detected.\n", id->ident); 493 printk(KERN_INFO "hdaps: %s detected.\n", id->ident);
628 return 0; 494 return 1;
629} 495}
630 496
497/* hdaps_dmi_match_invert - found an inverted match. */
631static int hdaps_dmi_match_invert(struct dmi_system_id *id) 498static int hdaps_dmi_match_invert(struct dmi_system_id *id)
632{ 499{
633 hdaps_invert = 1; 500 hdaps_invert = 1;
634 printk(KERN_INFO "hdaps: inverting axis readings.\n"); 501 printk(KERN_INFO "hdaps: inverting axis readings.\n");
635 return 0; 502 return hdaps_dmi_match(id);
636} 503}
637 504
638#define HDAPS_DMI_MATCH_NORMAL(model) { \ 505#define HDAPS_DMI_MATCH_NORMAL(model) { \
@@ -662,12 +529,15 @@ static int __init hdaps_init(void)
662 HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"), 529 HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"),
663 HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"), 530 HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"),
664 HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"), 531 HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"),
532 HDAPS_DMI_MATCH_NORMAL("ThinkPad R52"),
665 HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"), 533 HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"),
666 HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"), 534 HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"),
667 HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"), 535 HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"),
668 HDAPS_DMI_MATCH_NORMAL("ThinkPad T42"), 536 HDAPS_DMI_MATCH_NORMAL("ThinkPad T42"),
669 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"), 537 HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"),
670 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"), 538 HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"),
539 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"),
540 HDAPS_DMI_MATCH_NORMAL("ThinkPad X41"),
671 { .ident = NULL } 541 { .ident = NULL }
672 }; 542 };
673 543
@@ -696,8 +566,18 @@ static int __init hdaps_init(void)
696 if (ret) 566 if (ret)
697 goto out_device; 567 goto out_device;
698 568
699 if (hdaps_mousedev) 569 /* initial calibrate for the input device */
700 hdaps_mousedev_enable(); 570 hdaps_calibrate();
571
572 /* initialize the input class */
573 hdaps_idev.dev = &pdev->dev;
574 input_register_device(&hdaps_idev);
575
576 /* start up our timer for the input device */
577 init_timer(&hdaps_timer);
578 hdaps_timer.function = hdaps_mousedev_poll;
579 hdaps_timer.expires = jiffies + HDAPS_POLL_PERIOD;
580 add_timer(&hdaps_timer);
701 581
702 printk(KERN_INFO "hdaps: driver successfully loaded.\n"); 582 printk(KERN_INFO "hdaps: driver successfully loaded.\n");
703 return 0; 583 return 0;
@@ -715,8 +595,8 @@ out:
715 595
716static void __exit hdaps_exit(void) 596static void __exit hdaps_exit(void)
717{ 597{
718 hdaps_mousedev_disable(); 598 del_timer_sync(&hdaps_timer);
719 599 input_unregister_device(&hdaps_idev);
720 sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group); 600 sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
721 platform_device_unregister(pdev); 601 platform_device_unregister(pdev);
722 driver_unregister(&hdaps_driver); 602 driver_unregister(&hdaps_driver);
@@ -728,9 +608,6 @@ static void __exit hdaps_exit(void)
728module_init(hdaps_init); 608module_init(hdaps_init);
729module_exit(hdaps_exit); 609module_exit(hdaps_exit);
730 610
731module_param_named(mousedev, hdaps_mousedev, bool, 0);
732MODULE_PARM_DESC(mousedev, "enable the input class device");
733
734module_param_named(invert, hdaps_invert, bool, 0); 611module_param_named(invert, hdaps_invert, bool, 0);
735MODULE_PARM_DESC(invert, "invert data along each axis"); 612MODULE_PARM_DESC(invert, "invert data along each axis");
736 613
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 8334496a7e0a..3badfec75b1c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -245,6 +245,18 @@ config I2C_KEYWEST
245 This support is also available as a module. If so, the module 245 This support is also available as a module. If so, the module
246 will be called i2c-keywest. 246 will be called i2c-keywest.
247 247
248config I2C_PMAC_SMU
249 tristate "Powermac SMU I2C interface"
250 depends on I2C && PMAC_SMU
251 help
252 This supports the use of the I2C interface in the SMU
253 chip on recent Apple machines like the iMac G5. It is used
254 among others by the thermal control driver for those machines.
255 Say Y if you have such a machine.
256
257 This support is also available as a module. If so, the module
258 will be called i2c-pmac-smu.
259
248config I2C_MPC 260config I2C_MPC
249 tristate "MPC107/824x/85xx/52xx" 261 tristate "MPC107/824x/85xx/52xx"
250 depends on I2C && PPC32 262 depends on I2C && PPC32
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 980b3e983670..f1df00f66c6c 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_I2C_ITE) += i2c-ite.o
20obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o 20obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
21obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o 21obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o
22obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o 22obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o
23obj-$(CONFIG_I2C_PMAC_SMU) += i2c-pmac-smu.o
23obj-$(CONFIG_I2C_MPC) += i2c-mpc.o 24obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
24obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o 25obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
25obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o 26obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
diff --git a/drivers/i2c/busses/i2c-keywest.c b/drivers/i2c/busses/i2c-keywest.c
index 37b49c2daf5f..eff5896ce865 100644
--- a/drivers/i2c/busses/i2c-keywest.c
+++ b/drivers/i2c/busses/i2c-keywest.c
@@ -611,7 +611,6 @@ create_iface(struct device_node *np, struct device *dev)
611 611
612 for (i=0; i<nchan; i++) { 612 for (i=0; i<nchan; i++) {
613 struct keywest_chan* chan = &iface->channels[i]; 613 struct keywest_chan* chan = &iface->channels[i];
614 u8 addr;
615 614
616 sprintf(chan->adapter.name, "%s %d", np->parent->name, i); 615 sprintf(chan->adapter.name, "%s %d", np->parent->name, i);
617 chan->iface = iface; 616 chan->iface = iface;
diff --git a/drivers/i2c/busses/i2c-pmac-smu.c b/drivers/i2c/busses/i2c-pmac-smu.c
new file mode 100644
index 000000000000..8a9f5648a23d
--- /dev/null
+++ b/drivers/i2c/busses/i2c-pmac-smu.c
@@ -0,0 +1,316 @@
1/*
2 i2c Support for Apple SMU Controller
3
4 Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp.
5 <benh@kernel.crashing.org>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21*/
22
23#include <linux/config.h>
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/i2c.h>
28#include <linux/init.h>
29#include <linux/completion.h>
30#include <linux/device.h>
31#include <asm/prom.h>
32#include <asm/of_device.h>
33#include <asm/smu.h>
34
35static int probe;
36
37MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
38MODULE_DESCRIPTION("I2C driver for Apple's SMU");
39MODULE_LICENSE("GPL");
40module_param(probe, bool, 0);
41
42
43/* Physical interface */
44struct smu_iface
45{
46 struct i2c_adapter adapter;
47 struct completion complete;
48 u32 busid;
49};
50
51static void smu_i2c_done(struct smu_i2c_cmd *cmd, void *misc)
52{
53 struct smu_iface *iface = misc;
54 complete(&iface->complete);
55}
56
57/*
58 * SMBUS-type transfer entrypoint
59 */
60static s32 smu_smbus_xfer( struct i2c_adapter* adap,
61 u16 addr,
62 unsigned short flags,
63 char read_write,
64 u8 command,
65 int size,
66 union i2c_smbus_data* data)
67{
68 struct smu_iface *iface = i2c_get_adapdata(adap);
69 struct smu_i2c_cmd cmd;
70 int rc = 0;
71 int read = (read_write == I2C_SMBUS_READ);
72
73 cmd.info.bus = iface->busid;
74 cmd.info.devaddr = (addr << 1) | (read ? 0x01 : 0x00);
75
76 /* Prepare datas & select mode */
77 switch (size) {
78 case I2C_SMBUS_QUICK:
79 cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
80 cmd.info.datalen = 0;
81 break;
82 case I2C_SMBUS_BYTE:
83 cmd.info.type = SMU_I2C_TRANSFER_SIMPLE;
84 cmd.info.datalen = 1;
85 if (!read)
86 cmd.info.data[0] = data->byte;
87 break;
88 case I2C_SMBUS_BYTE_DATA:
89 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
90 cmd.info.datalen = 1;
91 cmd.info.sublen = 1;
92 cmd.info.subaddr[0] = command;
93 cmd.info.subaddr[1] = 0;
94 cmd.info.subaddr[2] = 0;
95 if (!read)
96 cmd.info.data[0] = data->byte;
97 break;
98 case I2C_SMBUS_WORD_DATA:
99 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
100 cmd.info.datalen = 2;
101 cmd.info.sublen = 1;
102 cmd.info.subaddr[0] = command;
103 cmd.info.subaddr[1] = 0;
104 cmd.info.subaddr[2] = 0;
105 if (!read) {
106 cmd.info.data[0] = data->byte & 0xff;
107 cmd.info.data[1] = (data->byte >> 8) & 0xff;
108 }
109 break;
110 /* Note that these are broken vs. the expected smbus API where
111 * on reads, the lenght is actually returned from the function,
112 * but I think the current API makes no sense and I don't want
113 * any driver that I haven't verified for correctness to go
114 * anywhere near a pmac i2c bus anyway ...
115 */
116 case I2C_SMBUS_BLOCK_DATA:
117 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
118 cmd.info.datalen = data->block[0] + 1;
119 if (cmd.info.datalen > 6)
120 return -EINVAL;
121 if (!read)
122 memcpy(cmd.info.data, data->block, cmd.info.datalen);
123 cmd.info.sublen = 1;
124 cmd.info.subaddr[0] = command;
125 cmd.info.subaddr[1] = 0;
126 cmd.info.subaddr[2] = 0;
127 break;
128 case I2C_SMBUS_I2C_BLOCK_DATA:
129 cmd.info.type = SMU_I2C_TRANSFER_STDSUB;
130 cmd.info.datalen = data->block[0];
131 if (cmd.info.datalen > 7)
132 return -EINVAL;
133 if (!read)
134 memcpy(cmd.info.data, &data->block[1],
135 cmd.info.datalen);
136 cmd.info.sublen = 1;
137 cmd.info.subaddr[0] = command;
138 cmd.info.subaddr[1] = 0;
139 cmd.info.subaddr[2] = 0;
140 break;
141
142 default:
143 return -EINVAL;
144 }
145
146 /* Turn a standardsub read into a combined mode access */
147 if (read_write == I2C_SMBUS_READ &&
148 cmd.info.type == SMU_I2C_TRANSFER_STDSUB)
149 cmd.info.type = SMU_I2C_TRANSFER_COMBINED;
150
151 /* Finish filling command and submit it */
152 cmd.done = smu_i2c_done;
153 cmd.misc = iface;
154 rc = smu_queue_i2c(&cmd);
155 if (rc < 0)
156 return rc;
157 wait_for_completion(&iface->complete);
158 rc = cmd.status;
159
160 if (!read || rc < 0)
161 return rc;
162
163 switch (size) {
164 case I2C_SMBUS_BYTE:
165 case I2C_SMBUS_BYTE_DATA:
166 data->byte = cmd.info.data[0];
167 break;
168 case I2C_SMBUS_WORD_DATA:
169 data->word = ((u16)cmd.info.data[1]) << 8;
170 data->word |= cmd.info.data[0];
171 break;
172 /* Note that these are broken vs. the expected smbus API where
173 * on reads, the lenght is actually returned from the function,
174 * but I think the current API makes no sense and I don't want
175 * any driver that I haven't verified for correctness to go
176 * anywhere near a pmac i2c bus anyway ...
177 */
178 case I2C_SMBUS_BLOCK_DATA:
179 case I2C_SMBUS_I2C_BLOCK_DATA:
180 memcpy(&data->block[0], cmd.info.data, cmd.info.datalen);
181 break;
182 }
183
184 return rc;
185}
186
187static u32
188smu_smbus_func(struct i2c_adapter * adapter)
189{
190 return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
191 I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
192 I2C_FUNC_SMBUS_BLOCK_DATA;
193}
194
195/* For now, we only handle combined mode (smbus) */
196static struct i2c_algorithm smu_algorithm = {
197 .smbus_xfer = smu_smbus_xfer,
198 .functionality = smu_smbus_func,
199};
200
201static int create_iface(struct device_node *np, struct device *dev)
202{
203 struct smu_iface* iface;
204 u32 *reg, busid;
205 int rc;
206
207 reg = (u32 *)get_property(np, "reg", NULL);
208 if (reg == NULL) {
209 printk(KERN_ERR "i2c-pmac-smu: can't find bus number !\n");
210 return -ENXIO;
211 }
212 busid = *reg;
213
214 iface = kmalloc(sizeof(struct smu_iface), GFP_KERNEL);
215 if (iface == NULL) {
216 printk(KERN_ERR "i2c-pmac-smu: can't allocate inteface !\n");
217 return -ENOMEM;
218 }
219 memset(iface, 0, sizeof(struct smu_iface));
220 init_completion(&iface->complete);
221 iface->busid = busid;
222
223 dev_set_drvdata(dev, iface);
224
225 sprintf(iface->adapter.name, "smu-i2c-%02x", busid);
226 iface->adapter.algo = &smu_algorithm;
227 iface->adapter.algo_data = NULL;
228 iface->adapter.client_register = NULL;
229 iface->adapter.client_unregister = NULL;
230 i2c_set_adapdata(&iface->adapter, iface);
231 iface->adapter.dev.parent = dev;
232
233 rc = i2c_add_adapter(&iface->adapter);
234 if (rc) {
235 printk(KERN_ERR "i2c-pamc-smu.c: Adapter %s registration "
236 "failed\n", iface->adapter.name);
237 i2c_set_adapdata(&iface->adapter, NULL);
238 }
239
240 if (probe) {
241 unsigned char addr;
242 printk("Probe: ");
243 for (addr = 0x00; addr <= 0x7f; addr++) {
244 if (i2c_smbus_xfer(&iface->adapter,addr,
245 0,0,0,I2C_SMBUS_QUICK,NULL) >= 0)
246 printk("%02x ", addr);
247 }
248 printk("\n");
249 }
250
251 printk(KERN_INFO "SMU i2c bus %x registered\n", busid);
252
253 return 0;
254}
255
256static int dispose_iface(struct device *dev)
257{
258 struct smu_iface *iface = dev_get_drvdata(dev);
259 int rc;
260
261 rc = i2c_del_adapter(&iface->adapter);
262 i2c_set_adapdata(&iface->adapter, NULL);
263 /* We aren't that prepared to deal with this... */
264 if (rc)
265 printk("i2c-pmac-smu.c: Failed to remove bus %s !\n",
266 iface->adapter.name);
267 dev_set_drvdata(dev, NULL);
268 kfree(iface);
269
270 return 0;
271}
272
273
274static int create_iface_of_platform(struct of_device* dev,
275 const struct of_device_id *match)
276{
277 return create_iface(dev->node, &dev->dev);
278}
279
280
281static int dispose_iface_of_platform(struct of_device* dev)
282{
283 return dispose_iface(&dev->dev);
284}
285
286
287static struct of_device_id i2c_smu_match[] =
288{
289 {
290 .compatible = "smu-i2c",
291 },
292 {},
293};
294static struct of_platform_driver i2c_smu_of_platform_driver =
295{
296 .name = "i2c-smu",
297 .match_table = i2c_smu_match,
298 .probe = create_iface_of_platform,
299 .remove = dispose_iface_of_platform
300};
301
302
303static int __init i2c_pmac_smu_init(void)
304{
305 of_register_driver(&i2c_smu_of_platform_driver);
306 return 0;
307}
308
309
310static void __exit i2c_pmac_smu_cleanup(void)
311{
312 of_unregister_driver(&i2c_smu_of_platform_driver);
313}
314
315module_init(i2c_pmac_smu_init);
316module_exit(i2c_pmac_smu_cleanup);
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index fdf53ce04248..44b595d90a4a 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -914,19 +914,23 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num
914 return ret; 914 return ret;
915} 915}
916 916
917static u32 i2c_pxa_functionality(struct i2c_adapter *adap)
918{
919 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
920}
921
917static struct i2c_algorithm i2c_pxa_algorithm = { 922static struct i2c_algorithm i2c_pxa_algorithm = {
918 .name = "PXA-I2C-Algorithm",
919 .id = I2C_ALGO_PXA,
920 .master_xfer = i2c_pxa_xfer, 923 .master_xfer = i2c_pxa_xfer,
924 .functionality = i2c_pxa_functionality,
921}; 925};
922 926
923static struct pxa_i2c i2c_pxa = { 927static struct pxa_i2c i2c_pxa = {
924 .lock = SPIN_LOCK_UNLOCKED, 928 .lock = SPIN_LOCK_UNLOCKED,
925 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(i2c_pxa.wait), 929 .wait = __WAIT_QUEUE_HEAD_INITIALIZER(i2c_pxa.wait),
926 .adap = { 930 .adap = {
927 .name = "pxa2xx-i2c", 931 .owner = THIS_MODULE,
928 .id = I2C_ALGO_PXA,
929 .algo = &i2c_pxa_algorithm, 932 .algo = &i2c_pxa_algorithm,
933 .name = "pxa2xx-i2c",
930 .retries = 5, 934 .retries = 5,
931 }, 935 },
932}; 936};
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index b443b04a4c5a..0b0aa4f51628 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -601,44 +601,15 @@ EXPORT_SYMBOL(ide_wait_stat);
601 */ 601 */
602u8 eighty_ninty_three (ide_drive_t *drive) 602u8 eighty_ninty_three (ide_drive_t *drive)
603{ 603{
604#if 0 604 if(HWIF(drive)->udma_four == 0)
605 if (!HWIF(drive)->udma_four) 605 return 0;
606 if (!(drive->id->hw_config & 0x6000))
606 return 0; 607 return 0;
607
608 if (drive->id->major_rev_num) {
609 int hssbd = 0;
610 int i;
611 /*
612 * Determine highest Supported SPEC
613 */
614 for (i=1; i<=15; i++)
615 if (drive->id->major_rev_num & (1<<i))
616 hssbd++;
617
618 switch (hssbd) {
619 case 7:
620 case 6:
621 case 5:
622 /* ATA-4 and older do not support above Ultra 33 */
623 default:
624 return 0;
625 }
626 }
627
628 return ((u8) (
629#ifndef CONFIG_IDEDMA_IVB
630 (drive->id->hw_config & 0x4000) &&
631#endif /* CONFIG_IDEDMA_IVB */
632 (drive->id->hw_config & 0x6000)) ? 1 : 0);
633
634#else
635
636 return ((u8) ((HWIF(drive)->udma_four) &&
637#ifndef CONFIG_IDEDMA_IVB 608#ifndef CONFIG_IDEDMA_IVB
638 (drive->id->hw_config & 0x4000) && 609 if(!(drive->id->hw_config & 0x4000))
610 return 0;
639#endif /* CONFIG_IDEDMA_IVB */ 611#endif /* CONFIG_IDEDMA_IVB */
640 (drive->id->hw_config & 0x6000)) ? 1 : 0); 612 return 1;
641#endif
642} 613}
643 614
644EXPORT_SYMBOL(eighty_ninty_three); 615EXPORT_SYMBOL(eighty_ninty_three);
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index d04f62ab5de1..ace8edad6e96 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -500,6 +500,7 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
500 } 500 }
501 501
502 rq.special = args; 502 rq.special = args;
503 args->rq = &rq;
503 return ide_do_drive_cmd(drive, &rq, ide_wait); 504 return ide_do_drive_cmd(drive, &rq, ide_wait);
504} 505}
505 506
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 0ccf85fcee34..a35a58bef1a4 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -477,7 +477,7 @@ static struct pcmcia_device_id ide_ids[] = {
477 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), 477 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
478 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), 478 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
479 PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), 479 PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
480 PCMCIA_DEVICE_PROD_ID12(" ", "NinjaATA-", 0x3b6e20c8, 0xebe0bd79), 480 PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
481 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), 481 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
482 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728), 482 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
483 PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1), 483 PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 3de9ab897e42..3d9c7afc8695 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -608,7 +608,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
608 608
609#ifdef __i386__ 609#ifdef __i386__
610 if (dev->resource[PCI_ROM_RESOURCE].start) { 610 if (dev->resource[PCI_ROM_RESOURCE].start) {
611 pci_write_config_byte(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE); 611 pci_write_config_dword(dev, PCI_ROM_ADDRESS, dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
612 printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name, dev->resource[PCI_ROM_RESOURCE].start); 612 printk(KERN_INFO "%s: ROM enabled at 0x%08lx\n", name, dev->resource[PCI_ROM_RESOURCE].start);
613 } 613 }
614#endif 614#endif
diff --git a/drivers/ide/pci/hpt34x.c b/drivers/ide/pci/hpt34x.c
index bbde46279984..be334da7a754 100644
--- a/drivers/ide/pci/hpt34x.c
+++ b/drivers/ide/pci/hpt34x.c
@@ -173,7 +173,7 @@ static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const cha
173 173
174 if (cmd & PCI_COMMAND_MEMORY) { 174 if (cmd & PCI_COMMAND_MEMORY) {
175 if (pci_resource_start(dev, PCI_ROM_RESOURCE)) { 175 if (pci_resource_start(dev, PCI_ROM_RESOURCE)) {
176 pci_write_config_byte(dev, PCI_ROM_ADDRESS, 176 pci_write_config_dword(dev, PCI_ROM_ADDRESS,
177 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE); 177 dev->resource[PCI_ROM_RESOURCE].start | PCI_ROM_ADDRESS_ENABLE);
178 printk(KERN_INFO "HPT345: ROM enabled at 0x%08lx\n", 178 printk(KERN_INFO "HPT345: ROM enabled at 0x%08lx\n",
179 dev->resource[PCI_ROM_RESOURCE].start); 179 dev->resource[PCI_ROM_RESOURCE].start);
diff --git a/drivers/ieee1394/amdtp.c b/drivers/ieee1394/amdtp.c
index 84ae027b021a..e8e28569a668 100644
--- a/drivers/ieee1394/amdtp.c
+++ b/drivers/ieee1394/amdtp.c
@@ -1297,4 +1297,3 @@ static void __exit amdtp_exit_module (void)
1297 1297
1298module_init(amdtp_init_module); 1298module_init(amdtp_init_module);
1299module_exit(amdtp_exit_module); 1299module_exit(amdtp_exit_module);
1300MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16);
diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h
index e6734263a1d3..28c5f4b726e2 100644
--- a/drivers/ieee1394/csr1212.h
+++ b/drivers/ieee1394/csr1212.h
@@ -37,7 +37,6 @@
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
42#include <asm/pgalloc.h> 41#include <asm/pgalloc.h>
43 42
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index 4538b0235ca3..e34730c7a874 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -2660,4 +2660,3 @@ static int __init dv1394_init_module(void)
2660 2660
2661module_init(dv1394_init_module); 2661module_init(dv1394_init_module);
2662module_exit(dv1394_exit_module); 2662module_exit(dv1394_exit_module);
2663MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16);
diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c
index cd53c174ced1..4802bbbb6dc9 100644
--- a/drivers/ieee1394/eth1394.c
+++ b/drivers/ieee1394/eth1394.c
@@ -89,7 +89,7 @@
89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__) 89#define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
90 90
91static char version[] __devinitdata = 91static char version[] __devinitdata =
92 "$Rev: 1264 $ Ben Collins <bcollins@debian.org>"; 92 "$Rev: 1312 $ Ben Collins <bcollins@debian.org>";
93 93
94struct fragment_info { 94struct fragment_info {
95 struct list_head list; 95 struct list_head list;
@@ -221,9 +221,7 @@ static int ether1394_open (struct net_device *dev)
221 if (priv->bc_state == ETHER1394_BC_ERROR) { 221 if (priv->bc_state == ETHER1394_BC_ERROR) {
222 /* we'll try again */ 222 /* we'll try again */
223 priv->iso = hpsb_iso_recv_init(priv->host, 223 priv->iso = hpsb_iso_recv_init(priv->host,
224 ETHER1394_GASP_BUFFERS * 2 * 224 ETHER1394_ISO_BUF_SIZE,
225 (1 << (priv->host->csr.max_rec +
226 1)),
227 ETHER1394_GASP_BUFFERS, 225 ETHER1394_GASP_BUFFERS,
228 priv->broadcast_channel, 226 priv->broadcast_channel,
229 HPSB_ISO_DMA_PACKET_PER_BUFFER, 227 HPSB_ISO_DMA_PACKET_PER_BUFFER,
@@ -635,8 +633,8 @@ static void ether1394_add_host (struct hpsb_host *host)
635 * be checked when the eth device is opened. */ 633 * be checked when the eth device is opened. */
636 priv->broadcast_channel = host->csr.broadcast_channel & 0x3f; 634 priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
637 635
638 priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 * 636 priv->iso = hpsb_iso_recv_init(host,
639 (1 << (host->csr.max_rec + 1))), 637 ETHER1394_ISO_BUF_SIZE,
640 ETHER1394_GASP_BUFFERS, 638 ETHER1394_GASP_BUFFERS,
641 priv->broadcast_channel, 639 priv->broadcast_channel,
642 HPSB_ISO_DMA_PACKET_PER_BUFFER, 640 HPSB_ISO_DMA_PACKET_PER_BUFFER,
@@ -1770,7 +1768,7 @@ fail:
1770static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1768static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1771{ 1769{
1772 strcpy (info->driver, driver_name); 1770 strcpy (info->driver, driver_name);
1773 strcpy (info->version, "$Rev: 1264 $"); 1771 strcpy (info->version, "$Rev: 1312 $");
1774 /* FIXME XXX provide sane businfo */ 1772 /* FIXME XXX provide sane businfo */
1775 strcpy (info->bus_info, "ieee1394"); 1773 strcpy (info->bus_info, "ieee1394");
1776} 1774}
diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h
index ed8f1c4b7fd8..a77213cfc483 100644
--- a/drivers/ieee1394/eth1394.h
+++ b/drivers/ieee1394/eth1394.h
@@ -44,6 +44,12 @@
44 44
45#define ETHER1394_GASP_BUFFERS 16 45#define ETHER1394_GASP_BUFFERS 16
46 46
47/* rawiso buffer size - due to a limitation in rawiso, we must limit each
48 * GASP buffer to be less than PAGE_SIZE. */
49#define ETHER1394_ISO_BUF_SIZE ETHER1394_GASP_BUFFERS * \
50 min((unsigned int)PAGE_SIZE, \
51 2 * (1U << (priv->host->csr.max_rec + 1)))
52
47/* Node set == 64 */ 53/* Node set == 64 */
48#define NODE_SET (ALL_NODES + 1) 54#define NODE_SET (ALL_NODES + 1)
49 55
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
index c502c6e9c440..aeeaeb670d03 100644
--- a/drivers/ieee1394/hosts.c
+++ b/drivers/ieee1394/hosts.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/timer.h> 20#include <linux/timer.h>
21#include <linux/jiffies.h>
21 22
22#include "csr1212.h" 23#include "csr1212.h"
23#include "ieee1394.h" 24#include "ieee1394.h"
@@ -217,7 +218,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host)
217 218
218 /* IEEE 1394a-2000 prohibits using the same generation number 219 /* IEEE 1394a-2000 prohibits using the same generation number
219 * twice in a 60 second period. */ 220 * twice in a 60 second period. */
220 if (jiffies - host->csr.gen_timestamp[next_gen] < 60 * HZ) 221 if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ))
221 /* Wait 60 seconds from the last time this generation number was 222 /* Wait 60 seconds from the last time this generation number was
222 * used. */ 223 * used. */
223 reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies; 224 reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies;
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h
index 739e76840d51..38f42112dff0 100644
--- a/drivers/ieee1394/hosts.h
+++ b/drivers/ieee1394/hosts.h
@@ -135,17 +135,17 @@ enum isoctl_cmd {
135 135
136enum reset_types { 136enum reset_types {
137 /* 166 microsecond reset -- only type of reset available on 137 /* 166 microsecond reset -- only type of reset available on
138 non-1394a capable IEEE 1394 controllers */ 138 non-1394a capable controllers */
139 LONG_RESET, 139 LONG_RESET,
140 140
141 /* Short (arbitrated) reset -- only available on 1394a capable 141 /* Short (arbitrated) reset -- only available on 1394a capable
142 IEEE 1394 capable controllers */ 142 controllers */
143 SHORT_RESET, 143 SHORT_RESET,
144 144
145 /* Variants, that set force_root before issueing the bus reset */ 145 /* Variants that set force_root before issueing the bus reset */
146 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT, 146 LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT,
147 147
148 /* Variants, that clear force_root before issueing the bus reset */ 148 /* Variants that clear force_root before issueing the bus reset */
149 LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT 149 LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT
150}; 150};
151 151
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index d633770fac8e..32a1e016c85e 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -70,7 +70,7 @@ const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S32
70struct class *hpsb_protocol_class; 70struct class *hpsb_protocol_class;
71 71
72#ifdef CONFIG_IEEE1394_VERBOSEDEBUG 72#ifdef CONFIG_IEEE1394_VERBOSEDEBUG
73static void dump_packet(const char *text, quadlet_t *data, int size) 73static void dump_packet(const char *text, quadlet_t *data, int size, int speed)
74{ 74{
75 int i; 75 int i;
76 76
@@ -78,12 +78,15 @@ static void dump_packet(const char *text, quadlet_t *data, int size)
78 size = (size > 4 ? 4 : size); 78 size = (size > 4 ? 4 : size);
79 79
80 printk(KERN_DEBUG "ieee1394: %s", text); 80 printk(KERN_DEBUG "ieee1394: %s", text);
81 if (speed > -1 && speed < 6)
82 printk(" at %s", hpsb_speedto_str[speed]);
83 printk(":");
81 for (i = 0; i < size; i++) 84 for (i = 0; i < size; i++)
82 printk(" %08x", data[i]); 85 printk(" %08x", data[i]);
83 printk("\n"); 86 printk("\n");
84} 87}
85#else 88#else
86#define dump_packet(x,y,z) 89#define dump_packet(a,b,c,d)
87#endif 90#endif
88 91
89static void abort_requests(struct hpsb_host *host); 92static void abort_requests(struct hpsb_host *host);
@@ -544,8 +547,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
544 if (packet->data_size) 547 if (packet->data_size)
545 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size); 548 memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size);
546 549
547 dump_packet("send packet local:", packet->header, 550 dump_packet("send packet local", packet->header, packet->header_size, -1);
548 packet->header_size);
549 551
550 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE); 552 hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE);
551 hpsb_packet_received(host, data, size, 0); 553 hpsb_packet_received(host, data, size, 0);
@@ -561,21 +563,7 @@ int hpsb_send_packet(struct hpsb_packet *packet)
561 + NODEID_TO_NODE(packet->node_id)]; 563 + NODEID_TO_NODE(packet->node_id)];
562 } 564 }
563 565
564#ifdef CONFIG_IEEE1394_VERBOSEDEBUG 566 dump_packet("send packet", packet->header, packet->header_size, packet->speed_code);
565 switch (packet->speed_code) {
566 case 2:
567 dump_packet("send packet 400:", packet->header,
568 packet->header_size);
569 break;
570 case 1:
571 dump_packet("send packet 200:", packet->header,
572 packet->header_size);
573 break;
574 default:
575 dump_packet("send packet 100:", packet->header,
576 packet->header_size);
577 }
578#endif
579 567
580 return host->driver->transmit_packet(host, packet); 568 return host->driver->transmit_packet(host, packet);
581} 569}
@@ -636,7 +624,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
636 624
637 if (packet == NULL) { 625 if (packet == NULL) {
638 HPSB_DEBUG("unsolicited response packet received - no tlabel match"); 626 HPSB_DEBUG("unsolicited response packet received - no tlabel match");
639 dump_packet("contents:", data, 16); 627 dump_packet("contents", data, 16, -1);
640 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 628 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
641 return; 629 return;
642 } 630 }
@@ -677,7 +665,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode,
677 if (!tcode_match) { 665 if (!tcode_match) {
678 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); 666 spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags);
679 HPSB_INFO("unsolicited response packet received - tcode mismatch"); 667 HPSB_INFO("unsolicited response packet received - tcode mismatch");
680 dump_packet("contents:", data, 16); 668 dump_packet("contents", data, 16, -1);
681 return; 669 return;
682 } 670 }
683 671
@@ -914,7 +902,7 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
914 return; 902 return;
915 } 903 }
916 904
917 dump_packet("received packet:", data, size); 905 dump_packet("received packet", data, size, -1);
918 906
919 tcode = (data[0] >> 4) & 0xf; 907 tcode = (data[0] >> 4) & 0xf;
920 908
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index b23322523ef5..347ece6b583c 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -64,10 +64,10 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length,
64 struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci; 64 struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci;
65 int i, ret = 0; 65 int i, ret = 0;
66 66
67 for (i = 0; i < 3; i++) { 67 for (i = 1; ; i++) {
68 ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr, 68 ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr,
69 buffer, length); 69 buffer, length);
70 if (!ret) 70 if (!ret || i == 3)
71 break; 71 break;
72 72
73 if (msleep_interruptible(334)) 73 if (msleep_interruptible(334))
@@ -1438,9 +1438,13 @@ static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles)
1438 if (host->busmgr_id == 0xffff && host->node_count > 1) 1438 if (host->busmgr_id == 0xffff && host->node_count > 1)
1439 { 1439 {
1440 u16 root_node = host->node_count - 1; 1440 u16 root_node = host->node_count - 1;
1441 struct node_entry *ne = find_entry_by_nodeid(host, root_node | LOCAL_BUS);
1442 1441
1443 if (ne && ne->busopt.cmc) 1442 /* get cycle master capability flag from root node */
1443 if (host->is_cycmst ||
1444 (!hpsb_read(host, LOCAL_BUS | root_node, get_hpsb_generation(host),
1445 (CSR_REGISTER_BASE + CSR_CONFIG_ROM + 2 * sizeof(quadlet_t)),
1446 &bc, sizeof(quadlet_t)) &&
1447 be32_to_cpu(bc) & 1 << CSR_CMC_SHIFT))
1444 hpsb_send_phy_config(host, root_node, -1); 1448 hpsb_send_phy_config(host, root_node, -1);
1445 else { 1449 else {
1446 HPSB_DEBUG("The root node is not cycle master capable; " 1450 HPSB_DEBUG("The root node is not cycle master capable; "
@@ -1557,24 +1561,19 @@ static int nodemgr_host_thread(void *__hi)
1557 } 1561 }
1558 } 1562 }
1559 1563
1560 if (!nodemgr_check_irm_capability(host, reset_cycles)) { 1564 if (!nodemgr_check_irm_capability(host, reset_cycles) ||
1565 !nodemgr_do_irm_duties(host, reset_cycles)) {
1561 reset_cycles++; 1566 reset_cycles++;
1562 up(&nodemgr_serialize); 1567 up(&nodemgr_serialize);
1563 continue; 1568 continue;
1564 } 1569 }
1570 reset_cycles = 0;
1565 1571
1566 /* Scan our nodes to get the bus options and create node 1572 /* Scan our nodes to get the bus options and create node
1567 * entries. This does not do the sysfs stuff, since that 1573 * entries. This does not do the sysfs stuff, since that
1568 * would trigger hotplug callbacks and such, which is a 1574 * would trigger hotplug callbacks and such, which is a
1569 * bad idea at this point. */ 1575 * bad idea at this point. */
1570 nodemgr_node_scan(hi, generation); 1576 nodemgr_node_scan(hi, generation);
1571 if (!nodemgr_do_irm_duties(host, reset_cycles)) {
1572 reset_cycles++;
1573 up(&nodemgr_serialize);
1574 continue;
1575 }
1576
1577 reset_cycles = 0;
1578 1577
1579 /* This actually does the full probe, with sysfs 1578 /* This actually does the full probe, with sysfs
1580 * registration. */ 1579 * registration. */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 27018c8efc24..6a6acbd80af4 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -162,7 +162,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) 162printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163 163
164static char version[] __devinitdata = 164static char version[] __devinitdata =
165 "$Rev: 1299 $ Ben Collins <bcollins@debian.org>"; 165 "$Rev: 1313 $ Ben Collins <bcollins@debian.org>";
166 166
167/* Module Parameters */ 167/* Module Parameters */
168static int phys_dma = 1; 168static int phys_dma = 1;
@@ -1084,7 +1084,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
1084 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1); 1084 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1085 1085
1086 if (printk_ratelimit()) 1086 if (printk_ratelimit())
1087 PRINT(KERN_ERR, "IR legacy activated"); 1087 DBGMSG("IR legacy activated");
1088 } 1088 }
1089 1089
1090 spin_lock_irqsave(&ohci->IR_channel_lock, flags); 1090 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index b4fa14793fe5..5fe4f2ba0979 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -2958,4 +2958,3 @@ static void __exit cleanup_raw1394(void)
2958module_init(init_raw1394); 2958module_init(init_raw1394);
2959module_exit(cleanup_raw1394); 2959module_exit(cleanup_raw1394);
2960MODULE_LICENSE("GPL"); 2960MODULE_LICENSE("GPL");
2961MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16);
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index de88218ef7cc..12cec7c4a342 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -97,16 +97,18 @@ static char version[] __devinitdata =
97 */ 97 */
98static int max_speed = IEEE1394_SPEED_MAX; 98static int max_speed = IEEE1394_SPEED_MAX;
99module_param(max_speed, int, 0644); 99module_param(max_speed, int, 0644);
100MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb default, 1 = 200mb, 0 = 100mb)"); 100MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb, 1 = 200mb, 0 = 100mb)");
101 101
102/* 102/*
103 * Set serialize_io to 1 if you'd like only one scsi command sent 103 * Set serialize_io to 1 if you'd like only one scsi command sent
104 * down to us at a time (debugging). This might be necessary for very 104 * down to us at a time (debugging). This might be necessary for very
105 * badly behaved sbp2 devices. 105 * badly behaved sbp2 devices.
106 *
107 * TODO: Make this configurable per device.
106 */ 108 */
107static int serialize_io; 109static int serialize_io = 1;
108module_param(serialize_io, int, 0444); 110module_param(serialize_io, int, 0444);
109MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (default = 0)"); 111MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default = 1, faster = 0)");
110 112
111/* 113/*
112 * Bump up max_sectors if you'd like to support very large sized 114 * Bump up max_sectors if you'd like to support very large sized
@@ -596,6 +598,14 @@ static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_i
596 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 598 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
597} 599}
598 600
601/*
602 * Is scsi_id valid? Is the 1394 node still present?
603 */
604static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id)
605{
606 return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo;
607}
608
599 609
600 610
601/********************************************* 611/*********************************************
@@ -631,11 +641,23 @@ static int sbp2_remove(struct device *dev)
631{ 641{
632 struct unit_directory *ud; 642 struct unit_directory *ud;
633 struct scsi_id_instance_data *scsi_id; 643 struct scsi_id_instance_data *scsi_id;
644 struct scsi_device *sdev;
634 645
635 SBP2_DEBUG("sbp2_remove"); 646 SBP2_DEBUG("sbp2_remove");
636 647
637 ud = container_of(dev, struct unit_directory, device); 648 ud = container_of(dev, struct unit_directory, device);
638 scsi_id = ud->device.driver_data; 649 scsi_id = ud->device.driver_data;
650 if (!scsi_id)
651 return 0;
652
653 /* Trigger shutdown functions in scsi's highlevel. */
654 if (scsi_id->scsi_host)
655 scsi_unblock_requests(scsi_id->scsi_host);
656 sdev = scsi_id->sdev;
657 if (sdev) {
658 scsi_id->sdev = NULL;
659 scsi_remove_device(sdev);
660 }
639 661
640 sbp2_logout_device(scsi_id); 662 sbp2_logout_device(scsi_id);
641 sbp2_remove_device(scsi_id); 663 sbp2_remove_device(scsi_id);
@@ -2473,37 +2495,26 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2473 struct scsi_id_instance_data *scsi_id = 2495 struct scsi_id_instance_data *scsi_id =
2474 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2496 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2475 struct sbp2scsi_host_info *hi; 2497 struct sbp2scsi_host_info *hi;
2498 int result = DID_NO_CONNECT << 16;
2476 2499
2477 SBP2_DEBUG("sbp2scsi_queuecommand"); 2500 SBP2_DEBUG("sbp2scsi_queuecommand");
2478 2501
2479 /* 2502 if (!sbp2util_node_is_available(scsi_id))
2480 * If scsi_id is null, it means there is no device in this slot, 2503 goto done;
2481 * so we should return selection timeout.
2482 */
2483 if (!scsi_id) {
2484 SCpnt->result = DID_NO_CONNECT << 16;
2485 done (SCpnt);
2486 return 0;
2487 }
2488 2504
2489 hi = scsi_id->hi; 2505 hi = scsi_id->hi;
2490 2506
2491 if (!hi) { 2507 if (!hi) {
2492 SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!"); 2508 SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!");
2493 SCpnt->result = DID_NO_CONNECT << 16; 2509 goto done;
2494 done (SCpnt);
2495 return(0);
2496 } 2510 }
2497 2511
2498 /* 2512 /*
2499 * Until we handle multiple luns, just return selection time-out 2513 * Until we handle multiple luns, just return selection time-out
2500 * to any IO directed at non-zero LUNs 2514 * to any IO directed at non-zero LUNs
2501 */ 2515 */
2502 if (SCpnt->device->lun) { 2516 if (SCpnt->device->lun)
2503 SCpnt->result = DID_NO_CONNECT << 16; 2517 goto done;
2504 done (SCpnt);
2505 return(0);
2506 }
2507 2518
2508 /* 2519 /*
2509 * Check for request sense command, and handle it here 2520 * Check for request sense command, and handle it here
@@ -2514,7 +2525,7 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2514 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen); 2525 memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen);
2515 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); 2526 memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
2516 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done); 2527 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done);
2517 return(0); 2528 return 0;
2518 } 2529 }
2519 2530
2520 /* 2531 /*
@@ -2522,9 +2533,8 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2522 */ 2533 */
2523 if (!hpsb_node_entry_valid(scsi_id->ne)) { 2534 if (!hpsb_node_entry_valid(scsi_id->ne)) {
2524 SBP2_ERR("Bus reset in progress - rejecting command"); 2535 SBP2_ERR("Bus reset in progress - rejecting command");
2525 SCpnt->result = DID_BUS_BUSY << 16; 2536 result = DID_BUS_BUSY << 16;
2526 done (SCpnt); 2537 goto done;
2527 return(0);
2528 } 2538 }
2529 2539
2530 /* 2540 /*
@@ -2535,8 +2545,12 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2535 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT, 2545 sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT,
2536 SCpnt, done); 2546 SCpnt, done);
2537 } 2547 }
2548 return 0;
2538 2549
2539 return(0); 2550done:
2551 SCpnt->result = result;
2552 done(SCpnt);
2553 return 0;
2540} 2554}
2541 2555
2542/* 2556/*
@@ -2683,14 +2697,27 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2683} 2697}
2684 2698
2685 2699
2686static int sbp2scsi_slave_configure (struct scsi_device *sdev) 2700static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
2687{ 2701{
2688 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2702 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
2703 return 0;
2704}
2705
2689 2706
2707static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2708{
2709 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2690 return 0; 2710 return 0;
2691} 2711}
2692 2712
2693 2713
2714static void sbp2scsi_slave_destroy(struct scsi_device *sdev)
2715{
2716 ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL;
2717 return;
2718}
2719
2720
2694/* 2721/*
2695 * Called by scsi stack when something has really gone wrong. Usually 2722 * Called by scsi stack when something has really gone wrong. Usually
2696 * called when a command has timed-out for some reason. 2723 * called when a command has timed-out for some reason.
@@ -2705,7 +2732,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2705 SBP2_ERR("aborting sbp2 command"); 2732 SBP2_ERR("aborting sbp2 command");
2706 scsi_print_command(SCpnt); 2733 scsi_print_command(SCpnt);
2707 2734
2708 if (scsi_id) { 2735 if (sbp2util_node_is_available(scsi_id)) {
2709 2736
2710 /* 2737 /*
2711 * Right now, just return any matching command structures 2738 * Right now, just return any matching command structures
@@ -2742,31 +2769,24 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt)
2742/* 2769/*
2743 * Called by scsi stack when something has really gone wrong. 2770 * Called by scsi stack when something has really gone wrong.
2744 */ 2771 */
2745static int __sbp2scsi_reset(struct scsi_cmnd *SCpnt) 2772static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2746{ 2773{
2747 struct scsi_id_instance_data *scsi_id = 2774 struct scsi_id_instance_data *scsi_id =
2748 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; 2775 (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0];
2776 unsigned long flags;
2749 2777
2750 SBP2_ERR("reset requested"); 2778 SBP2_ERR("reset requested");
2751 2779
2752 if (scsi_id) { 2780 spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
2781
2782 if (sbp2util_node_is_available(scsi_id)) {
2753 SBP2_ERR("Generating sbp2 fetch agent reset"); 2783 SBP2_ERR("Generating sbp2 fetch agent reset");
2754 sbp2_agent_reset(scsi_id, 0); 2784 sbp2_agent_reset(scsi_id, 0);
2755 } 2785 }
2756 2786
2757 return(SUCCESS);
2758}
2759
2760static int sbp2scsi_reset(struct scsi_cmnd *SCpnt)
2761{
2762 unsigned long flags;
2763 int rc;
2764
2765 spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
2766 rc = __sbp2scsi_reset(SCpnt);
2767 spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); 2787 spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
2768 2788
2769 return rc; 2789 return SUCCESS;
2770} 2790}
2771 2791
2772static const char *sbp2scsi_info (struct Scsi_Host *host) 2792static const char *sbp2scsi_info (struct Scsi_Host *host)
@@ -2817,7 +2837,9 @@ static struct scsi_host_template scsi_driver_template = {
2817 .eh_device_reset_handler = sbp2scsi_reset, 2837 .eh_device_reset_handler = sbp2scsi_reset,
2818 .eh_bus_reset_handler = sbp2scsi_reset, 2838 .eh_bus_reset_handler = sbp2scsi_reset,
2819 .eh_host_reset_handler = sbp2scsi_reset, 2839 .eh_host_reset_handler = sbp2scsi_reset,
2840 .slave_alloc = sbp2scsi_slave_alloc,
2820 .slave_configure = sbp2scsi_slave_configure, 2841 .slave_configure = sbp2scsi_slave_configure,
2842 .slave_destroy = sbp2scsi_slave_destroy,
2821 .this_id = -1, 2843 .this_id = -1,
2822 .sg_tablesize = SG_ALL, 2844 .sg_tablesize = SG_ALL,
2823 .use_clustering = ENABLE_CLUSTERING, 2845 .use_clustering = ENABLE_CLUSTERING,
@@ -2837,7 +2859,8 @@ static int sbp2_module_init(void)
2837 2859
2838 /* Module load debug option to force one command at a time (serializing I/O) */ 2860 /* Module load debug option to force one command at a time (serializing I/O) */
2839 if (serialize_io) { 2861 if (serialize_io) {
2840 SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)"); 2862 SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)");
2863 SBP2_INFO("Try serialize_io=0 for better performance");
2841 scsi_driver_template.can_queue = 1; 2864 scsi_driver_template.can_queue = 1;
2842 scsi_driver_template.cmd_per_lun = 1; 2865 scsi_driver_template.cmd_per_lun = 1;
2843 } 2866 }
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 9d6facf2f78f..11be9c9c82a8 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -1571,4 +1571,3 @@ static int __init video1394_init_module (void)
1571 1571
1572module_init(video1394_init_module); 1572module_init(video1394_init_module);
1573module_exit(video1394_exit_module); 1573module_exit(video1394_exit_module);
1574MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 2bd8b1cc57c4..e23836d0e21b 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -412,8 +412,8 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
412 412
413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
415 pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 415 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
416 if (pad > data_size || pad < 0) 416 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
417 pad = 0; 417 pad = 0;
418 418
419 return hdr_size + rmpp_recv->seg_num * data_size - pad; 419 return hdr_size + rmpp_recv->seg_num * data_size - pad;
@@ -583,6 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
583{ 583{
584 struct ib_rmpp_mad *rmpp_mad; 584 struct ib_rmpp_mad *rmpp_mad;
585 int timeout; 585 int timeout;
586 u32 paylen;
586 587
587 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 588 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
588 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 589 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
@@ -590,11 +591,9 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
590 591
591 if (mad_send_wr->seg_num == 1) { 592 if (mad_send_wr->seg_num == 1) {
592 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; 593 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
593 rmpp_mad->rmpp_hdr.paylen_newwin = 594 paylen = mad_send_wr->total_seg * IB_MGMT_RMPP_DATA -
594 cpu_to_be32(mad_send_wr->total_seg * 595 mad_send_wr->pad;
595 (sizeof(struct ib_rmpp_mad) - 596 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
596 offsetof(struct ib_rmpp_mad, data)) -
597 mad_send_wr->pad);
598 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad); 597 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
599 } else { 598 } else {
600 mad_send_wr->send_wr.num_sge = 2; 599 mad_send_wr->send_wr.num_sge = 2;
@@ -608,10 +607,8 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
608 607
609 if (mad_send_wr->seg_num == mad_send_wr->total_seg) { 608 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
610 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; 609 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
611 rmpp_mad->rmpp_hdr.paylen_newwin = 610 paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
612 cpu_to_be32(sizeof(struct ib_rmpp_mad) - 611 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
613 offsetof(struct ib_rmpp_mad, data) -
614 mad_send_wr->pad);
615 } 612 }
616 613
617 /* 2 seconds for an ACK until we can find the packet lifetime */ 614 /* 2 seconds for an ACK until we can find the packet lifetime */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7c2f03057ddb..a64d6b4dcc16 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -334,10 +334,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
334 ret = -EINVAL; 334 ret = -EINVAL;
335 goto err_ah; 335 goto err_ah;
336 } 336 }
337 /* Validate that management class can support RMPP */ 337
338 /* Validate that the management class can support RMPP */
338 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 339 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
339 hdr_len = offsetof(struct ib_sa_mad, data); 340 hdr_len = offsetof(struct ib_sa_mad, data);
340 data_len = length; 341 data_len = length - hdr_len;
341 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 342 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
342 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { 343 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
343 hdr_len = offsetof(struct ib_vendor_mad, data); 344 hdr_len = offsetof(struct ib_vendor_mad, data);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b1897bed14ad..cc124344dd2c 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -69,6 +69,7 @@ struct ib_uverbs_event_file {
69 69
70struct ib_uverbs_file { 70struct ib_uverbs_file {
71 struct kref ref; 71 struct kref ref;
72 struct semaphore mutex;
72 struct ib_uverbs_device *device; 73 struct ib_uverbs_device *device;
73 struct ib_ucontext *ucontext; 74 struct ib_ucontext *ucontext;
74 struct ib_event_handler event_handler; 75 struct ib_event_handler event_handler;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e91ebde46481..562445165d2b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -76,8 +76,9 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
76 struct ib_uverbs_get_context_resp resp; 76 struct ib_uverbs_get_context_resp resp;
77 struct ib_udata udata; 77 struct ib_udata udata;
78 struct ib_device *ibdev = file->device->ib_dev; 78 struct ib_device *ibdev = file->device->ib_dev;
79 struct ib_ucontext *ucontext;
79 int i; 80 int i;
80 int ret = in_len; 81 int ret;
81 82
82 if (out_len < sizeof resp) 83 if (out_len < sizeof resp)
83 return -ENOSPC; 84 return -ENOSPC;
@@ -85,45 +86,56 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
85 if (copy_from_user(&cmd, buf, sizeof cmd)) 86 if (copy_from_user(&cmd, buf, sizeof cmd))
86 return -EFAULT; 87 return -EFAULT;
87 88
89 down(&file->mutex);
90
91 if (file->ucontext) {
92 ret = -EINVAL;
93 goto err;
94 }
95
88 INIT_UDATA(&udata, buf + sizeof cmd, 96 INIT_UDATA(&udata, buf + sizeof cmd,
89 (unsigned long) cmd.response + sizeof resp, 97 (unsigned long) cmd.response + sizeof resp,
90 in_len - sizeof cmd, out_len - sizeof resp); 98 in_len - sizeof cmd, out_len - sizeof resp);
91 99
92 file->ucontext = ibdev->alloc_ucontext(ibdev, &udata); 100 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
93 if (IS_ERR(file->ucontext)) { 101 if (IS_ERR(ucontext))
94 ret = PTR_ERR(file->ucontext); 102 return PTR_ERR(file->ucontext);
95 file->ucontext = NULL;
96 return ret;
97 }
98 103
99 file->ucontext->device = ibdev; 104 ucontext->device = ibdev;
100 INIT_LIST_HEAD(&file->ucontext->pd_list); 105 INIT_LIST_HEAD(&ucontext->pd_list);
101 INIT_LIST_HEAD(&file->ucontext->mr_list); 106 INIT_LIST_HEAD(&ucontext->mr_list);
102 INIT_LIST_HEAD(&file->ucontext->mw_list); 107 INIT_LIST_HEAD(&ucontext->mw_list);
103 INIT_LIST_HEAD(&file->ucontext->cq_list); 108 INIT_LIST_HEAD(&ucontext->cq_list);
104 INIT_LIST_HEAD(&file->ucontext->qp_list); 109 INIT_LIST_HEAD(&ucontext->qp_list);
105 INIT_LIST_HEAD(&file->ucontext->srq_list); 110 INIT_LIST_HEAD(&ucontext->srq_list);
106 INIT_LIST_HEAD(&file->ucontext->ah_list); 111 INIT_LIST_HEAD(&ucontext->ah_list);
107 spin_lock_init(&file->ucontext->lock);
108 112
109 resp.async_fd = file->async_file.fd; 113 resp.async_fd = file->async_file.fd;
110 for (i = 0; i < file->device->num_comp; ++i) 114 for (i = 0; i < file->device->num_comp; ++i)
111 if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab + 115 if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab +
112 i * sizeof (__u32), 116 i * sizeof (__u32),
113 &file->comp_file[i].fd, sizeof (__u32))) 117 &file->comp_file[i].fd, sizeof (__u32))) {
114 goto err; 118 ret = -EFAULT;
119 goto err_free;
120 }
115 121
116 if (copy_to_user((void __user *) (unsigned long) cmd.response, 122 if (copy_to_user((void __user *) (unsigned long) cmd.response,
117 &resp, sizeof resp)) 123 &resp, sizeof resp)) {
118 goto err; 124 ret = -EFAULT;
125 goto err_free;
126 }
127
128 file->ucontext = ucontext;
129 up(&file->mutex);
119 130
120 return in_len; 131 return in_len;
121 132
122err: 133err_free:
123 ibdev->dealloc_ucontext(file->ucontext); 134 ibdev->dealloc_ucontext(ucontext);
124 file->ucontext = NULL;
125 135
126 return -EFAULT; 136err:
137 up(&file->mutex);
138 return ret;
127} 139}
128 140
129ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 141ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
@@ -352,9 +364,9 @@ retry:
352 if (ret) 364 if (ret)
353 goto err_pd; 365 goto err_pd;
354 366
355 spin_lock_irq(&file->ucontext->lock); 367 down(&file->mutex);
356 list_add_tail(&uobj->list, &file->ucontext->pd_list); 368 list_add_tail(&uobj->list, &file->ucontext->pd_list);
357 spin_unlock_irq(&file->ucontext->lock); 369 up(&file->mutex);
358 370
359 memset(&resp, 0, sizeof resp); 371 memset(&resp, 0, sizeof resp);
360 resp.pd_handle = uobj->id; 372 resp.pd_handle = uobj->id;
@@ -368,9 +380,9 @@ retry:
368 return in_len; 380 return in_len;
369 381
370err_list: 382err_list:
371 spin_lock_irq(&file->ucontext->lock); 383 down(&file->mutex);
372 list_del(&uobj->list); 384 list_del(&uobj->list);
373 spin_unlock_irq(&file->ucontext->lock); 385 up(&file->mutex);
374 386
375 down(&ib_uverbs_idr_mutex); 387 down(&ib_uverbs_idr_mutex);
376 idr_remove(&ib_uverbs_pd_idr, uobj->id); 388 idr_remove(&ib_uverbs_pd_idr, uobj->id);
@@ -410,9 +422,9 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
410 422
411 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 423 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
412 424
413 spin_lock_irq(&file->ucontext->lock); 425 down(&file->mutex);
414 list_del(&uobj->list); 426 list_del(&uobj->list);
415 spin_unlock_irq(&file->ucontext->lock); 427 up(&file->mutex);
416 428
417 kfree(uobj); 429 kfree(uobj);
418 430
@@ -512,9 +524,9 @@ retry:
512 524
513 resp.mr_handle = obj->uobject.id; 525 resp.mr_handle = obj->uobject.id;
514 526
515 spin_lock_irq(&file->ucontext->lock); 527 down(&file->mutex);
516 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 528 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
517 spin_unlock_irq(&file->ucontext->lock); 529 up(&file->mutex);
518 530
519 if (copy_to_user((void __user *) (unsigned long) cmd.response, 531 if (copy_to_user((void __user *) (unsigned long) cmd.response,
520 &resp, sizeof resp)) { 532 &resp, sizeof resp)) {
@@ -527,9 +539,9 @@ retry:
527 return in_len; 539 return in_len;
528 540
529err_list: 541err_list:
530 spin_lock_irq(&file->ucontext->lock); 542 down(&file->mutex);
531 list_del(&obj->uobject.list); 543 list_del(&obj->uobject.list);
532 spin_unlock_irq(&file->ucontext->lock); 544 up(&file->mutex);
533 545
534err_unreg: 546err_unreg:
535 ib_dereg_mr(mr); 547 ib_dereg_mr(mr);
@@ -570,9 +582,9 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
570 582
571 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 583 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
572 584
573 spin_lock_irq(&file->ucontext->lock); 585 down(&file->mutex);
574 list_del(&memobj->uobject.list); 586 list_del(&memobj->uobject.list);
575 spin_unlock_irq(&file->ucontext->lock); 587 up(&file->mutex);
576 588
577 ib_umem_release(file->device->ib_dev, &memobj->umem); 589 ib_umem_release(file->device->ib_dev, &memobj->umem);
578 kfree(memobj); 590 kfree(memobj);
@@ -647,9 +659,9 @@ retry:
647 if (ret) 659 if (ret)
648 goto err_cq; 660 goto err_cq;
649 661
650 spin_lock_irq(&file->ucontext->lock); 662 down(&file->mutex);
651 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 663 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
652 spin_unlock_irq(&file->ucontext->lock); 664 up(&file->mutex);
653 665
654 memset(&resp, 0, sizeof resp); 666 memset(&resp, 0, sizeof resp);
655 resp.cq_handle = uobj->uobject.id; 667 resp.cq_handle = uobj->uobject.id;
@@ -664,9 +676,9 @@ retry:
664 return in_len; 676 return in_len;
665 677
666err_list: 678err_list:
667 spin_lock_irq(&file->ucontext->lock); 679 down(&file->mutex);
668 list_del(&uobj->uobject.list); 680 list_del(&uobj->uobject.list);
669 spin_unlock_irq(&file->ucontext->lock); 681 up(&file->mutex);
670 682
671 down(&ib_uverbs_idr_mutex); 683 down(&ib_uverbs_idr_mutex);
672 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 684 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
@@ -712,9 +724,9 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
712 724
713 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 725 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
714 726
715 spin_lock_irq(&file->ucontext->lock); 727 down(&file->mutex);
716 list_del(&uobj->uobject.list); 728 list_del(&uobj->uobject.list);
717 spin_unlock_irq(&file->ucontext->lock); 729 up(&file->mutex);
718 730
719 spin_lock_irq(&file->comp_file[0].lock); 731 spin_lock_irq(&file->comp_file[0].lock);
720 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { 732 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
@@ -847,9 +859,9 @@ retry:
847 859
848 resp.qp_handle = uobj->uobject.id; 860 resp.qp_handle = uobj->uobject.id;
849 861
850 spin_lock_irq(&file->ucontext->lock); 862 down(&file->mutex);
851 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 863 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
852 spin_unlock_irq(&file->ucontext->lock); 864 up(&file->mutex);
853 865
854 if (copy_to_user((void __user *) (unsigned long) cmd.response, 866 if (copy_to_user((void __user *) (unsigned long) cmd.response,
855 &resp, sizeof resp)) { 867 &resp, sizeof resp)) {
@@ -862,9 +874,9 @@ retry:
862 return in_len; 874 return in_len;
863 875
864err_list: 876err_list:
865 spin_lock_irq(&file->ucontext->lock); 877 down(&file->mutex);
866 list_del(&uobj->uobject.list); 878 list_del(&uobj->uobject.list);
867 spin_unlock_irq(&file->ucontext->lock); 879 up(&file->mutex);
868 880
869err_destroy: 881err_destroy:
870 ib_destroy_qp(qp); 882 ib_destroy_qp(qp);
@@ -989,9 +1001,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
989 1001
990 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1002 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
991 1003
992 spin_lock_irq(&file->ucontext->lock); 1004 down(&file->mutex);
993 list_del(&uobj->uobject.list); 1005 list_del(&uobj->uobject.list);
994 spin_unlock_irq(&file->ucontext->lock); 1006 up(&file->mutex);
995 1007
996 spin_lock_irq(&file->async_file.lock); 1008 spin_lock_irq(&file->async_file.lock);
997 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1009 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
@@ -1136,9 +1148,9 @@ retry:
1136 1148
1137 resp.srq_handle = uobj->uobject.id; 1149 resp.srq_handle = uobj->uobject.id;
1138 1150
1139 spin_lock_irq(&file->ucontext->lock); 1151 down(&file->mutex);
1140 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1152 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1141 spin_unlock_irq(&file->ucontext->lock); 1153 up(&file->mutex);
1142 1154
1143 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1155 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1144 &resp, sizeof resp)) { 1156 &resp, sizeof resp)) {
@@ -1151,9 +1163,9 @@ retry:
1151 return in_len; 1163 return in_len;
1152 1164
1153err_list: 1165err_list:
1154 spin_lock_irq(&file->ucontext->lock); 1166 down(&file->mutex);
1155 list_del(&uobj->uobject.list); 1167 list_del(&uobj->uobject.list);
1156 spin_unlock_irq(&file->ucontext->lock); 1168 up(&file->mutex);
1157 1169
1158err_destroy: 1170err_destroy:
1159 ib_destroy_srq(srq); 1171 ib_destroy_srq(srq);
@@ -1227,9 +1239,9 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1227 1239
1228 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1240 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1229 1241
1230 spin_lock_irq(&file->ucontext->lock); 1242 down(&file->mutex);
1231 list_del(&uobj->uobject.list); 1243 list_del(&uobj->uobject.list);
1232 spin_unlock_irq(&file->ucontext->lock); 1244 up(&file->mutex);
1233 1245
1234 spin_lock_irq(&file->async_file.lock); 1246 spin_lock_irq(&file->async_file.lock);
1235 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1247 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index ce5bdb7af306..12511808de21 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -448,7 +448,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
448 if (hdr.in_words * 4 != count) 448 if (hdr.in_words * 4 != count)
449 return -EINVAL; 449 return -EINVAL;
450 450
451 if (hdr.command < 0 || hdr.command >= ARRAY_SIZE(uverbs_cmd_table)) 451 if (hdr.command < 0 ||
452 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
453 !uverbs_cmd_table[hdr.command])
452 return -EINVAL; 454 return -EINVAL;
453 455
454 if (!file->ucontext && 456 if (!file->ucontext &&
@@ -484,27 +486,29 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
484 file = kmalloc(sizeof *file + 486 file = kmalloc(sizeof *file +
485 (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file), 487 (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file),
486 GFP_KERNEL); 488 GFP_KERNEL);
487 if (!file) 489 if (!file) {
488 return -ENOMEM; 490 ret = -ENOMEM;
491 goto err;
492 }
489 493
490 file->device = dev; 494 file->device = dev;
491 kref_init(&file->ref); 495 kref_init(&file->ref);
496 init_MUTEX(&file->mutex);
492 497
493 file->ucontext = NULL; 498 file->ucontext = NULL;
494 499
500 kref_get(&file->ref);
495 ret = ib_uverbs_event_init(&file->async_file, file); 501 ret = ib_uverbs_event_init(&file->async_file, file);
496 if (ret) 502 if (ret)
497 goto err; 503 goto err_kref;
498 504
499 file->async_file.is_async = 1; 505 file->async_file.is_async = 1;
500 506
501 kref_get(&file->ref);
502
503 for (i = 0; i < dev->num_comp; ++i) { 507 for (i = 0; i < dev->num_comp; ++i) {
508 kref_get(&file->ref);
504 ret = ib_uverbs_event_init(&file->comp_file[i], file); 509 ret = ib_uverbs_event_init(&file->comp_file[i], file);
505 if (ret) 510 if (ret)
506 goto err_async; 511 goto err_async;
507 kref_get(&file->ref);
508 file->comp_file[i].is_async = 0; 512 file->comp_file[i].is_async = 0;
509 } 513 }
510 514
@@ -524,9 +528,16 @@ err_async:
524 528
525 ib_uverbs_event_release(&file->async_file); 529 ib_uverbs_event_release(&file->async_file);
526 530
527err: 531err_kref:
532 /*
533 * One extra kref_put() because we took a reference before the
534 * event file creation that failed and got us here.
535 */
536 kref_put(&file->ref, ib_uverbs_release_file);
528 kref_put(&file->ref, ib_uverbs_release_file); 537 kref_put(&file->ref, ib_uverbs_release_file);
529 538
539err:
540 module_put(dev->ib_dev->owner);
530 return ret; 541 return ret;
531} 542}
532 543
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index cc758a2d2bc6..f6a8ac026557 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -605,7 +605,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
605 err = -EINVAL; 605 err = -EINVAL;
606 goto out; 606 goto out;
607 } 607 }
608 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { 608 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) {
609 if (virt != -1) { 609 if (virt != -1) {
610 pages[nent * 2] = cpu_to_be64(virt); 610 pages[nent * 2] = cpu_to_be64(virt);
611 virt += 1 << lg; 611 virt += 1 << lg;
@@ -616,7 +616,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
616 ts += 1 << (lg - 10); 616 ts += 1 << (lg - 10);
617 ++tc; 617 ++tc;
618 618
619 if (nent == MTHCA_MAILBOX_SIZE / 16) { 619 if (++nent == MTHCA_MAILBOX_SIZE / 16) {
620 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 620 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
621 CMD_TIME_CLASS_B, status); 621 CMD_TIME_CLASS_B, status);
622 if (err || *status) 622 if (err || *status)
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 18f0981eb0c1..c81fa8e975ef 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -476,12 +476,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
476 int i; 476 int i;
477 u8 status; 477 u8 status;
478 478
479 /* Make sure EQ size is aligned to a power of 2 size. */ 479 eq->dev = dev;
480 for (i = 1; i < nent; i <<= 1) 480 eq->nent = roundup_pow_of_two(max(nent, 2));
481 ; /* nothing */
482 nent = i;
483
484 eq->dev = dev;
485 481
486 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 482 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
487 GFP_KERNEL); 483 GFP_KERNEL);
@@ -512,7 +508,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
512 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 508 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
513 } 509 }
514 510
515 for (i = 0; i < nent; ++i) 511 for (i = 0; i < eq->nent; ++i)
516 set_eqe_hw(get_eqe(eq, i)); 512 set_eqe_hw(get_eqe(eq, i));
517 513
518 eq->eqn = mthca_alloc(&dev->eq_table.alloc); 514 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
@@ -528,8 +524,6 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
528 if (err) 524 if (err)
529 goto err_out_free_eq; 525 goto err_out_free_eq;
530 526
531 eq->nent = nent;
532
533 memset(eq_context, 0, sizeof *eq_context); 527 memset(eq_context, 0, sizeof *eq_context);
534 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | 528 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
535 MTHCA_EQ_OWNER_HW | 529 MTHCA_EQ_OWNER_HW |
@@ -538,7 +532,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
538 if (mthca_is_memfree(dev)) 532 if (mthca_is_memfree(dev))
539 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); 533 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
540 534
541 eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
542 if (mthca_is_memfree(dev)) { 536 if (mthca_is_memfree(dev)) {
543 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); 537 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
544 } else { 538 } else {
@@ -569,7 +563,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
569 dev->eq_table.arm_mask |= eq->eqn_mask; 563 dev->eq_table.arm_mask |= eq->eqn_mask;
570 564
571 mthca_dbg(dev, "Allocated EQ %d with %d entries\n", 565 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
572 eq->eqn, nent); 566 eq->eqn, eq->nent);
573 567
574 return err; 568 return err;
575 569
@@ -842,7 +836,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
842 dev->eq_table.clr_mask = 836 dev->eq_table.clr_mask =
843 swab32(1 << (dev->eq_table.inta_pin & 31)); 837 swab32(1 << (dev->eq_table.inta_pin & 31));
844 dev->eq_table.clr_int = dev->clr_base + 838 dev->eq_table.clr_int = dev->clr_base +
845 (dev->eq_table.inta_pin < 31 ? 4 : 0); 839 (dev->eq_table.inta_pin < 32 ? 4 : 0);
846 } 840 }
847 841
848 dev->eq_table.arm_mask = 0; 842 dev->eq_table.arm_mask = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 1827400f189b..7bd7a4bec7b4 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -290,7 +290,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
290 int i; 290 int i;
291 u8 status; 291 u8 status;
292 292
293 num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE; 293 num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
294 294
295 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); 295 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
296 if (!table) 296 if (!table)
@@ -529,12 +529,25 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
529 goto found; 529 goto found;
530 } 530 }
531 531
532 for (i = start; i != end; i += dir)
533 if (!dev->db_tab->page[i].db_rec) {
534 page = dev->db_tab->page + i;
535 goto alloc;
536 }
537
532 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { 538 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
533 ret = -ENOMEM; 539 ret = -ENOMEM;
534 goto out; 540 goto out;
535 } 541 }
536 542
543 if (group == 0)
544 ++dev->db_tab->max_group1;
545 else
546 --dev->db_tab->min_group2;
547
537 page = dev->db_tab->page + end; 548 page = dev->db_tab->page + end;
549
550alloc:
538 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096, 551 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
539 &page->mapping, GFP_KERNEL); 552 &page->mapping, GFP_KERNEL);
540 if (!page->db_rec) { 553 if (!page->db_rec) {
@@ -554,10 +567,6 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
554 } 567 }
555 568
556 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); 569 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
557 if (group == 0)
558 ++dev->db_tab->max_group1;
559 else
560 --dev->db_tab->min_group2;
561 570
562found: 571found:
563 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); 572 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1c1c2e230871..3f5319a46577 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -84,7 +84,7 @@ static int mthca_query_device(struct ib_device *ibdev,
84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
85 0xffffff; 85 0xffffff;
86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
87 props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); 87 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
89 memcpy(&props->node_guid, out_mad->data + 12, 8); 89 memcpy(&props->node_guid, out_mad->data + 12, 8);
90 90
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index bcef06bf15e7..5fa00669f9b8 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
227 wq->last_comp = wq->max - 1; 227 wq->last_comp = wq->max - 1;
228 wq->head = 0; 228 wq->head = 0;
229 wq->tail = 0; 229 wq->tail = 0;
230 wq->last = NULL;
231} 230}
232 231
233void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 232void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -687,7 +686,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
687 } 686 }
688 687
689 if (attr_mask & IB_QP_TIMEOUT) { 688 if (attr_mask & IB_QP_TIMEOUT) {
690 qp_context->pri_path.ackto = attr->timeout; 689 qp_context->pri_path.ackto = attr->timeout << 3;
691 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 690 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
692 } 691 }
693 692
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1103 } 1102 }
1104 } 1103 }
1105 1104
1105 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1106 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1107
1106 return 0; 1108 return 0;
1107} 1109}
1108 1110
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1583 goto out; 1585 goto out;
1584 } 1586 }
1585 1587
1586 if (prev_wqe) { 1588 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1587 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1589 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1588 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1590 qp->send_wqe_offset) |
1589 qp->send_wqe_offset) | 1591 mthca_opcode[wr->opcode]);
1590 mthca_opcode[wr->opcode]); 1592 wmb();
1591 wmb(); 1593 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1592 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1594 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1593 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1594 }
1595 1595
1596 if (!size0) { 1596 if (!size0) {
1597 size0 = size; 1597 size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1688 1688
1689 qp->wrid[ind] = wr->wr_id; 1689 qp->wrid[ind] = wr->wr_id;
1690 1690
1691 if (likely(prev_wqe)) { 1691 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1692 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1692 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1693 cpu_to_be32((ind << qp->rq.wqe_shift) | 1); 1693 wmb();
1694 wmb(); 1694 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1695 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1695 cpu_to_be32(MTHCA_NEXT_DBD | size);
1696 cpu_to_be32(MTHCA_NEXT_DBD | size);
1697 }
1698 1696
1699 if (!size0) 1697 if (!size0)
1700 size0 = size; 1698 size0 = size;
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1905 goto out; 1903 goto out;
1906 } 1904 }
1907 1905
1908 if (likely(prev_wqe)) { 1906 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1909 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1907 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1910 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1908 qp->send_wqe_offset) |
1911 qp->send_wqe_offset) | 1909 mthca_opcode[wr->opcode]);
1912 mthca_opcode[wr->opcode]); 1910 wmb();
1913 wmb(); 1911 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1914 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1912 cpu_to_be32(MTHCA_NEXT_DBD | size);
1915 cpu_to_be32(MTHCA_NEXT_DBD | size);
1916 }
1917 1913
1918 if (!size0) { 1914 if (!size0) {
1919 size0 = size; 1915 size0 = size;
@@ -2127,5 +2123,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2127 for (i = 0; i < 2; ++i) 2123 for (i = 0; i < 2; ++i)
2128 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2124 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2129 2125
2126 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2130 mthca_alloc_cleanup(&dev->qp_table.alloc); 2127 mthca_alloc_cleanup(&dev->qp_table.alloc);
2131} 2128}
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 75cd2d84ef12..18998d48c53e 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -172,6 +172,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
173 } 173 }
174 174
175 srq->last = get_wqe(srq, srq->max - 1);
176
175 return 0; 177 return 0;
176} 178}
177 179
@@ -189,7 +191,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
189 191
190 srq->max = attr->max_wr; 192 srq->max = attr->max_wr;
191 srq->max_gs = attr->max_sge; 193 srq->max_gs = attr->max_sge;
192 srq->last = NULL;
193 srq->counter = 0; 194 srq->counter = 0;
194 195
195 if (mthca_is_memfree(dev)) 196 if (mthca_is_memfree(dev))
@@ -409,7 +410,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
409 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 410 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
410 err = -ENOMEM; 411 err = -ENOMEM;
411 *bad_wr = wr; 412 *bad_wr = wr;
412 return nreq; 413 break;
413 } 414 }
414 415
415 wqe = get_wqe(srq, ind); 416 wqe = get_wqe(srq, ind);
@@ -427,7 +428,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
427 err = -EINVAL; 428 err = -EINVAL;
428 *bad_wr = wr; 429 *bad_wr = wr;
429 srq->last = prev_wqe; 430 srq->last = prev_wqe;
430 return nreq; 431 break;
431 } 432 }
432 433
433 for (i = 0; i < wr->num_sge; ++i) { 434 for (i = 0; i < wr->num_sge; ++i) {
@@ -446,20 +447,16 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
446 ((struct mthca_data_seg *) wqe)->addr = 0; 447 ((struct mthca_data_seg *) wqe)->addr = 0;
447 } 448 }
448 449
449 if (likely(prev_wqe)) { 450 ((struct mthca_next_seg *) prev_wqe)->nda_op =
450 ((struct mthca_next_seg *) prev_wqe)->nda_op = 451 cpu_to_be32((ind << srq->wqe_shift) | 1);
451 cpu_to_be32((ind << srq->wqe_shift) | 1); 452 wmb();
452 wmb(); 453 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
453 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 454 cpu_to_be32(MTHCA_NEXT_DBD);
454 cpu_to_be32(MTHCA_NEXT_DBD);
455 }
456 455
457 srq->wrid[ind] = wr->wr_id; 456 srq->wrid[ind] = wr->wr_id;
458 srq->first_free = next_ind; 457 srq->first_free = next_ind;
459 } 458 }
460 459
461 return nreq;
462
463 if (likely(nreq)) { 460 if (likely(nreq)) {
464 __be32 doorbell[2]; 461 __be32 doorbell[2];
465 462
@@ -503,7 +500,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
503 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 500 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 err = -ENOMEM; 501 err = -ENOMEM;
505 *bad_wr = wr; 502 *bad_wr = wr;
506 return nreq; 503 break;
507 } 504 }
508 505
509 wqe = get_wqe(srq, ind); 506 wqe = get_wqe(srq, ind);
@@ -519,7 +516,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
519 if (unlikely(wr->num_sge > srq->max_gs)) { 516 if (unlikely(wr->num_sge > srq->max_gs)) {
520 err = -EINVAL; 517 err = -EINVAL;
521 *bad_wr = wr; 518 *bad_wr = wr;
522 return nreq; 519 break;
523 } 520 }
524 521
525 for (i = 0; i < wr->num_sge; ++i) { 522 for (i = 0; i < wr->num_sge; ++i) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bea960b8191f..4ea1c1ca85bc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -257,7 +257,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
257 257
258void ipoib_mcast_restart_task(void *dev_ptr); 258void ipoib_mcast_restart_task(void *dev_ptr);
259int ipoib_mcast_start_thread(struct net_device *dev); 259int ipoib_mcast_start_thread(struct net_device *dev);
260int ipoib_mcast_stop_thread(struct net_device *dev); 260int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
261 261
262void ipoib_mcast_dev_down(struct net_device *dev); 262void ipoib_mcast_dev_down(struct net_device *dev);
263void ipoib_mcast_dev_flush(struct net_device *dev); 263void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ef0e3894863c..f7440096b5ed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -432,7 +432,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
432 flush_workqueue(ipoib_workqueue); 432 flush_workqueue(ipoib_workqueue);
433 } 433 }
434 434
435 ipoib_mcast_stop_thread(dev); 435 ipoib_mcast_stop_thread(dev, 1);
436 436
437 /* 437 /*
438 * Flush the multicast groups first so we stop any multicast joins. The 438 * Flush the multicast groups first so we stop any multicast joins. The
@@ -599,7 +599,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
599 599
600 ipoib_dbg(priv, "cleaning up ib_dev\n"); 600 ipoib_dbg(priv, "cleaning up ib_dev\n");
601 601
602 ipoib_mcast_stop_thread(dev); 602 ipoib_mcast_stop_thread(dev, 1);
603 603
604 /* Delete the broadcast address and the local address */ 604 /* Delete the broadcast address and the local address */
605 ipoib_mcast_dev_down(dev); 605 ipoib_mcast_dev_down(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 49d120d2b92c..704f48e0b6a7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1005,6 +1005,7 @@ debug_failed:
1005 1005
1006register_failed: 1006register_failed:
1007 ib_unregister_event_handler(&priv->event_handler); 1007 ib_unregister_event_handler(&priv->event_handler);
1008 flush_scheduled_work();
1008 1009
1009event_failed: 1010event_failed:
1010 ipoib_dev_cleanup(priv->dev); 1011 ipoib_dev_cleanup(priv->dev);
@@ -1057,6 +1058,7 @@ static void ipoib_remove_one(struct ib_device *device)
1057 1058
1058 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1059 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1059 ib_unregister_event_handler(&priv->event_handler); 1060 ib_unregister_event_handler(&priv->event_handler);
1061 flush_scheduled_work();
1060 1062
1061 unregister_netdev(priv->dev); 1063 unregister_netdev(priv->dev);
1062 ipoib_dev_cleanup(priv->dev); 1064 ipoib_dev_cleanup(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index aca7aea18a69..36ce29836bf2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -145,7 +145,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
145 145
146 mcast->dev = dev; 146 mcast->dev = dev;
147 mcast->created = jiffies; 147 mcast->created = jiffies;
148 mcast->backoff = HZ; 148 mcast->backoff = 1;
149 mcast->logcount = 0; 149 mcast->logcount = 0;
150 150
151 INIT_LIST_HEAD(&mcast->list); 151 INIT_LIST_HEAD(&mcast->list);
@@ -396,7 +396,7 @@ static void ipoib_mcast_join_complete(int status,
396 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 396 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
397 397
398 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { 398 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
399 mcast->backoff = HZ; 399 mcast->backoff = 1;
400 down(&mcast_mutex); 400 down(&mcast_mutex);
401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
402 queue_work(ipoib_workqueue, &priv->mcast_task); 402 queue_work(ipoib_workqueue, &priv->mcast_task);
@@ -496,7 +496,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
496 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 496 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
497 queue_delayed_work(ipoib_workqueue, 497 queue_delayed_work(ipoib_workqueue,
498 &priv->mcast_task, 498 &priv->mcast_task,
499 mcast->backoff); 499 mcast->backoff * HZ);
500 up(&mcast_mutex); 500 up(&mcast_mutex);
501 } else 501 } else
502 mcast->query_id = ret; 502 mcast->query_id = ret;
@@ -598,7 +598,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
598 return 0; 598 return 0;
599} 599}
600 600
601int ipoib_mcast_stop_thread(struct net_device *dev) 601int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
602{ 602{
603 struct ipoib_dev_priv *priv = netdev_priv(dev); 603 struct ipoib_dev_priv *priv = netdev_priv(dev);
604 struct ipoib_mcast *mcast; 604 struct ipoib_mcast *mcast;
@@ -610,7 +610,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
610 cancel_delayed_work(&priv->mcast_task); 610 cancel_delayed_work(&priv->mcast_task);
611 up(&mcast_mutex); 611 up(&mcast_mutex);
612 612
613 flush_workqueue(ipoib_workqueue); 613 if (flush)
614 flush_workqueue(ipoib_workqueue);
614 615
615 if (priv->broadcast && priv->broadcast->query) { 616 if (priv->broadcast && priv->broadcast->query) {
616 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query); 617 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
@@ -832,7 +833,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
832 833
833 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 834 ipoib_dbg_mcast(priv, "restarting multicast task\n");
834 835
835 ipoib_mcast_stop_thread(dev); 836 ipoib_mcast_stop_thread(dev, 0);
836 837
837 spin_lock_irqsave(&priv->lock, flags); 838 spin_lock_irqsave(&priv->lock, flags);
838 839
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 88636a204525..14ae5583e198 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -308,6 +308,7 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st
308 MATCH_BIT(ledbit, LED_MAX); 308 MATCH_BIT(ledbit, LED_MAX);
309 MATCH_BIT(sndbit, SND_MAX); 309 MATCH_BIT(sndbit, SND_MAX);
310 MATCH_BIT(ffbit, FF_MAX); 310 MATCH_BIT(ffbit, FF_MAX);
311 MATCH_BIT(swbit, SW_MAX);
311 312
312 return id; 313 return id;
313 } 314 }
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index e55dee390775..444f7756fee6 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -132,6 +132,17 @@ config KEYBOARD_CORGI
132 To compile this driver as a module, choose M here: the 132 To compile this driver as a module, choose M here: the
133 module will be called corgikbd. 133 module will be called corgikbd.
134 134
135config KEYBOARD_SPITZ
136 tristate "Spitz keyboard"
137 depends on PXA_SHARPSL
138 default y
139 help
140 Say Y here to enable the keyboard on the Sharp Zaurus SL-C1000,
141 SL-C3000 and Sl-C3100 series of PDAs.
142
143 To compile this driver as a module, choose M here: the
144 module will be called spitzkbd.
145
135config KEYBOARD_MAPLE 146config KEYBOARD_MAPLE
136 tristate "Maple bus keyboard" 147 tristate "Maple bus keyboard"
137 depends on SH_DREAMCAST && MAPLE 148 depends on SH_DREAMCAST && MAPLE
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index b02eeceea3c3..9ce0b87f2fac 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_KEYBOARD_LOCOMO) += locomokbd.o
14obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o 14obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
15obj-$(CONFIG_KEYBOARD_98KBD) += 98kbd.o 15obj-$(CONFIG_KEYBOARD_98KBD) += 98kbd.o
16obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o 16obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
17obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
17obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o 18obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
18obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o 19obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
19 20
diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c
new file mode 100644
index 000000000000..1714045a182b
--- /dev/null
+++ b/drivers/input/keyboard/spitzkbd.c
@@ -0,0 +1,478 @@
1/*
2 * Keyboard driver for Sharp Spitz, Borzoi and Akita (SL-Cxx00 series)
3 *
4 * Copyright (c) 2005 Richard Purdie
5 *
6 * Based on corgikbd.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/device.h>
16#include <linux/init.h>
17#include <linux/input.h>
18#include <linux/interrupt.h>
19#include <linux/jiffies.h>
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <asm/irq.h>
23
24#include <asm/arch/spitz.h>
25#include <asm/arch/hardware.h>
26#include <asm/arch/pxa-regs.h>
27
28#define KB_ROWS 7
29#define KB_COLS 11
30#define KB_ROWMASK(r) (1 << (r))
31#define SCANCODE(r,c) (((r)<<4) + (c) + 1)
32#define NR_SCANCODES ((KB_ROWS<<4) + 1)
33
34#define HINGE_SCAN_INTERVAL (150) /* ms */
35
36#define SPITZ_KEY_CALENDER KEY_F1
37#define SPITZ_KEY_ADDRESS KEY_F2
38#define SPITZ_KEY_FN KEY_F3
39#define SPITZ_KEY_CANCEL KEY_F4
40#define SPITZ_KEY_EXOK KEY_F5
41#define SPITZ_KEY_EXCANCEL KEY_F6
42#define SPITZ_KEY_EXJOGDOWN KEY_F7
43#define SPITZ_KEY_EXJOGUP KEY_F8
44#define SPITZ_KEY_JAP1 KEY_LEFTALT
45#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
46#define SPITZ_KEY_SYNC KEY_F9
47#define SPITZ_KEY_MAIL KEY_F10
48#define SPITZ_KEY_OK KEY_F11
49#define SPITZ_KEY_MENU KEY_F12
50
51static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
52 0, /* 0 */
53 KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
54 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
55 KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
56 SPITZ_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, /* 49-64 */
57 SPITZ_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
58 SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
59 KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
60};
61
62static int spitz_strobes[] = {
63 SPITZ_GPIO_KEY_STROBE0,
64 SPITZ_GPIO_KEY_STROBE1,
65 SPITZ_GPIO_KEY_STROBE2,
66 SPITZ_GPIO_KEY_STROBE3,
67 SPITZ_GPIO_KEY_STROBE4,
68 SPITZ_GPIO_KEY_STROBE5,
69 SPITZ_GPIO_KEY_STROBE6,
70 SPITZ_GPIO_KEY_STROBE7,
71 SPITZ_GPIO_KEY_STROBE8,
72 SPITZ_GPIO_KEY_STROBE9,
73 SPITZ_GPIO_KEY_STROBE10,
74};
75
76static int spitz_senses[] = {
77 SPITZ_GPIO_KEY_SENSE0,
78 SPITZ_GPIO_KEY_SENSE1,
79 SPITZ_GPIO_KEY_SENSE2,
80 SPITZ_GPIO_KEY_SENSE3,
81 SPITZ_GPIO_KEY_SENSE4,
82 SPITZ_GPIO_KEY_SENSE5,
83 SPITZ_GPIO_KEY_SENSE6,
84};
85
86struct spitzkbd {
87 unsigned char keycode[ARRAY_SIZE(spitzkbd_keycode)];
88 struct input_dev input;
89 char phys[32];
90
91 spinlock_t lock;
92 struct timer_list timer;
93 struct timer_list htimer;
94
95 unsigned int suspended;
96 unsigned long suspend_jiffies;
97};
98
99#define KB_DISCHARGE_DELAY 10
100#define KB_ACTIVATE_DELAY 10
101
102/* Helper functions for reading the keyboard matrix
103 * Note: We should really be using pxa_gpio_mode to alter GPDR but it
104 * requires a function call per GPIO bit which is excessive
105 * when we need to access 11 bits at once, multiple times.
106 * These functions must be called within local_irq_save()/local_irq_restore()
107 * or similar.
108 */
109static inline void spitzkbd_discharge_all(void)
110{
111 /* STROBE All HiZ */
112 GPCR0 = SPITZ_GPIO_G0_STROBE_BIT;
113 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
114 GPCR1 = SPITZ_GPIO_G1_STROBE_BIT;
115 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
116 GPCR2 = SPITZ_GPIO_G2_STROBE_BIT;
117 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
118 GPCR3 = SPITZ_GPIO_G3_STROBE_BIT;
119 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
120}
121
122static inline void spitzkbd_activate_all(void)
123{
124 /* STROBE ALL -> High */
125 GPSR0 = SPITZ_GPIO_G0_STROBE_BIT;
126 GPDR0 |= SPITZ_GPIO_G0_STROBE_BIT;
127 GPSR1 = SPITZ_GPIO_G1_STROBE_BIT;
128 GPDR1 |= SPITZ_GPIO_G1_STROBE_BIT;
129 GPSR2 = SPITZ_GPIO_G2_STROBE_BIT;
130 GPDR2 |= SPITZ_GPIO_G2_STROBE_BIT;
131 GPSR3 = SPITZ_GPIO_G3_STROBE_BIT;
132 GPDR3 |= SPITZ_GPIO_G3_STROBE_BIT;
133
134 udelay(KB_DISCHARGE_DELAY);
135
136 /* Clear any interrupts we may have triggered when altering the GPIO lines */
137 GEDR0 = SPITZ_GPIO_G0_SENSE_BIT;
138 GEDR1 = SPITZ_GPIO_G1_SENSE_BIT;
139 GEDR2 = SPITZ_GPIO_G2_SENSE_BIT;
140 GEDR3 = SPITZ_GPIO_G3_SENSE_BIT;
141}
142
143static inline void spitzkbd_activate_col(int col)
144{
145 int gpio = spitz_strobes[col];
146 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
147 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
148 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
149 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
150 GPSR(gpio) = GPIO_bit(gpio);
151 GPDR(gpio) |= GPIO_bit(gpio);
152}
153
154static inline void spitzkbd_reset_col(int col)
155{
156 int gpio = spitz_strobes[col];
157 GPDR0 &= ~SPITZ_GPIO_G0_STROBE_BIT;
158 GPDR1 &= ~SPITZ_GPIO_G1_STROBE_BIT;
159 GPDR2 &= ~SPITZ_GPIO_G2_STROBE_BIT;
160 GPDR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
161 GPCR(gpio) = GPIO_bit(gpio);
162 GPDR(gpio) |= GPIO_bit(gpio);
163}
164
165static inline int spitzkbd_get_row_status(int col)
166{
167 return ((GPLR0 >> 12) & 0x01) | ((GPLR0 >> 16) & 0x02)
168 | ((GPLR2 >> 25) & 0x04) | ((GPLR1 << 1) & 0x08)
169 | ((GPLR1 >> 0) & 0x10) | ((GPLR1 >> 1) & 0x60);
170}
171
172/*
173 * The spitz keyboard only generates interrupts when a key is pressed.
174 * When a key is pressed, we enable a timer which then scans the
175 * keyboard to detect when the key is released.
176 */
177
178/* Scan the hardware keyboard and push any changes up through the input layer */
179static void spitzkbd_scankeyboard(struct spitzkbd *spitzkbd_data, struct pt_regs *regs)
180{
181 unsigned int row, col, rowd;
182 unsigned long flags;
183 unsigned int num_pressed, pwrkey = ((GPLR(SPITZ_GPIO_ON_KEY) & GPIO_bit(SPITZ_GPIO_ON_KEY)) != 0);
184
185 if (spitzkbd_data->suspended)
186 return;
187
188 spin_lock_irqsave(&spitzkbd_data->lock, flags);
189
190 if (regs)
191 input_regs(&spitzkbd_data->input, regs);
192
193 num_pressed = 0;
194 for (col = 0; col < KB_COLS; col++) {
195 /*
196 * Discharge the output driver capacitatance
197 * in the keyboard matrix. (Yes it is significant..)
198 */
199
200 spitzkbd_discharge_all();
201 udelay(KB_DISCHARGE_DELAY);
202
203 spitzkbd_activate_col(col);
204 udelay(KB_ACTIVATE_DELAY);
205
206 rowd = spitzkbd_get_row_status(col);
207 for (row = 0; row < KB_ROWS; row++) {
208 unsigned int scancode, pressed;
209
210 scancode = SCANCODE(row, col);
211 pressed = rowd & KB_ROWMASK(row);
212
213 input_report_key(&spitzkbd_data->input, spitzkbd_data->keycode[scancode], pressed);
214
215 if (pressed)
216 num_pressed++;
217 }
218 spitzkbd_reset_col(col);
219 }
220
221 spitzkbd_activate_all();
222
223 input_report_key(&spitzkbd_data->input, SPITZ_KEY_SYNC, (GPLR(SPITZ_GPIO_SYNC) & GPIO_bit(SPITZ_GPIO_SYNC)) != 0 );
224 input_report_key(&spitzkbd_data->input, KEY_SUSPEND, pwrkey);
225
226 if (pwrkey && time_after(jiffies, spitzkbd_data->suspend_jiffies + msecs_to_jiffies(1000))) {
227 input_event(&spitzkbd_data->input, EV_PWR, KEY_SUSPEND, 1);
228 spitzkbd_data->suspend_jiffies = jiffies;
229 }
230
231 input_sync(&spitzkbd_data->input);
232
233 /* if any keys are pressed, enable the timer */
234 if (num_pressed)
235 mod_timer(&spitzkbd_data->timer, jiffies + msecs_to_jiffies(100));
236
237 spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
238}
239
240/*
241 * spitz keyboard interrupt handler.
242 */
243static irqreturn_t spitzkbd_interrupt(int irq, void *dev_id, struct pt_regs *regs)
244{
245 struct spitzkbd *spitzkbd_data = dev_id;
246
247 if (!timer_pending(&spitzkbd_data->timer)) {
248 /** wait chattering delay **/
249 udelay(20);
250 spitzkbd_scankeyboard(spitzkbd_data, regs);
251 }
252
253 return IRQ_HANDLED;
254}
255
256/*
257 * spitz timer checking for released keys
258 */
259static void spitzkbd_timer_callback(unsigned long data)
260{
261 struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
262 spitzkbd_scankeyboard(spitzkbd_data, NULL);
263}
264
265/*
266 * The hinge switches generate an interrupt.
267 * We debounce the switches and pass them to the input system.
268 */
269
270static irqreturn_t spitzkbd_hinge_isr(int irq, void *dev_id, struct pt_regs *regs)
271{
272 struct spitzkbd *spitzkbd_data = dev_id;
273
274 if (!timer_pending(&spitzkbd_data->htimer))
275 mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
276
277 return IRQ_HANDLED;
278}
279
280#define HINGE_STABLE_COUNT 2
281static int sharpsl_hinge_state;
282static int hinge_count;
283
284static void spitzkbd_hinge_timer(unsigned long data)
285{
286 struct spitzkbd *spitzkbd_data = (struct spitzkbd *) data;
287 unsigned long state;
288 unsigned long flags;
289
290 state = GPLR(SPITZ_GPIO_SWA) & (GPIO_bit(SPITZ_GPIO_SWA)|GPIO_bit(SPITZ_GPIO_SWB));
291 if (state != sharpsl_hinge_state) {
292 hinge_count = 0;
293 sharpsl_hinge_state = state;
294 } else if (hinge_count < HINGE_STABLE_COUNT) {
295 hinge_count++;
296 }
297
298 if (hinge_count >= HINGE_STABLE_COUNT) {
299 spin_lock_irqsave(&spitzkbd_data->lock, flags);
300
301 input_report_switch(&spitzkbd_data->input, SW_0, ((GPLR(SPITZ_GPIO_SWA) & GPIO_bit(SPITZ_GPIO_SWA)) != 0));
302 input_report_switch(&spitzkbd_data->input, SW_1, ((GPLR(SPITZ_GPIO_SWB) & GPIO_bit(SPITZ_GPIO_SWB)) != 0));
303 input_sync(&spitzkbd_data->input);
304
305 spin_unlock_irqrestore(&spitzkbd_data->lock, flags);
306 } else {
307 mod_timer(&spitzkbd_data->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
308 }
309}
310
311#ifdef CONFIG_PM
312static int spitzkbd_suspend(struct device *dev, pm_message_t state, uint32_t level)
313{
314 if (level == SUSPEND_POWER_DOWN) {
315 int i;
316 struct spitzkbd *spitzkbd = dev_get_drvdata(dev);
317 spitzkbd->suspended = 1;
318
319 /* Set Strobe lines as inputs - *except* strobe line 0 leave this
320 enabled so we can detect a power button press for resume */
321 for (i = 1; i < SPITZ_KEY_STROBE_NUM; i++)
322 pxa_gpio_mode(spitz_strobes[i] | GPIO_IN);
323 }
324 return 0;
325}
326
327static int spitzkbd_resume(struct device *dev, uint32_t level)
328{
329 if (level == RESUME_POWER_ON) {
330 int i;
331 struct spitzkbd *spitzkbd = dev_get_drvdata(dev);
332
333 for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
334 pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
335
336 /* Upon resume, ignore the suspend key for a short while */
337 spitzkbd->suspend_jiffies = jiffies;
338 spitzkbd->suspended = 0;
339 }
340 return 0;
341}
342#else
343#define spitzkbd_suspend NULL
344#define spitzkbd_resume NULL
345#endif
346
347static int __init spitzkbd_probe(struct device *dev)
348{
349 int i;
350 struct spitzkbd *spitzkbd;
351
352 spitzkbd = kzalloc(sizeof(struct spitzkbd), GFP_KERNEL);
353 if (!spitzkbd)
354 return -ENOMEM;
355
356 dev_set_drvdata(dev,spitzkbd);
357 strcpy(spitzkbd->phys, "spitzkbd/input0");
358
359 spin_lock_init(&spitzkbd->lock);
360
361 /* Init Keyboard rescan timer */
362 init_timer(&spitzkbd->timer);
363 spitzkbd->timer.function = spitzkbd_timer_callback;
364 spitzkbd->timer.data = (unsigned long) spitzkbd;
365
366 /* Init Hinge Timer */
367 init_timer(&spitzkbd->htimer);
368 spitzkbd->htimer.function = spitzkbd_hinge_timer;
369 spitzkbd->htimer.data = (unsigned long) spitzkbd;
370
371 spitzkbd->suspend_jiffies=jiffies;
372
373 init_input_dev(&spitzkbd->input);
374 spitzkbd->input.private = spitzkbd;
375 spitzkbd->input.name = "Spitz Keyboard";
376 spitzkbd->input.dev = dev;
377 spitzkbd->input.phys = spitzkbd->phys;
378 spitzkbd->input.id.bustype = BUS_HOST;
379 spitzkbd->input.id.vendor = 0x0001;
380 spitzkbd->input.id.product = 0x0001;
381 spitzkbd->input.id.version = 0x0100;
382 spitzkbd->input.evbit[0] = BIT(EV_KEY) | BIT(EV_REP) | BIT(EV_PWR) | BIT(EV_SW);
383 spitzkbd->input.keycode = spitzkbd->keycode;
384 spitzkbd->input.keycodesize = sizeof(unsigned char);
385 spitzkbd->input.keycodemax = ARRAY_SIZE(spitzkbd_keycode);
386
387 memcpy(spitzkbd->keycode, spitzkbd_keycode, sizeof(spitzkbd->keycode));
388 for (i = 0; i < ARRAY_SIZE(spitzkbd_keycode); i++)
389 set_bit(spitzkbd->keycode[i], spitzkbd->input.keybit);
390 clear_bit(0, spitzkbd->input.keybit);
391 set_bit(SW_0, spitzkbd->input.swbit);
392 set_bit(SW_1, spitzkbd->input.swbit);
393
394 input_register_device(&spitzkbd->input);
395 mod_timer(&spitzkbd->htimer, jiffies + msecs_to_jiffies(HINGE_SCAN_INTERVAL));
396
397 /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
398 for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++) {
399 pxa_gpio_mode(spitz_senses[i] | GPIO_IN);
400 if (request_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd_interrupt,
401 SA_INTERRUPT, "Spitzkbd Sense", spitzkbd))
402 printk(KERN_WARNING "spitzkbd: Can't get Sense IRQ: %d!\n", i);
403 else
404 set_irq_type(IRQ_GPIO(spitz_senses[i]),IRQT_RISING);
405 }
406
407 /* Set Strobe lines as outputs - set high */
408 for (i = 0; i < SPITZ_KEY_STROBE_NUM; i++)
409 pxa_gpio_mode(spitz_strobes[i] | GPIO_OUT | GPIO_DFLT_HIGH);
410
411 pxa_gpio_mode(SPITZ_GPIO_SYNC | GPIO_IN);
412 pxa_gpio_mode(SPITZ_GPIO_ON_KEY | GPIO_IN);
413 pxa_gpio_mode(SPITZ_GPIO_SWA | GPIO_IN);
414 pxa_gpio_mode(SPITZ_GPIO_SWB | GPIO_IN);
415
416 request_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd_interrupt, SA_INTERRUPT, "Spitzkbd Sync", spitzkbd);
417 request_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd_interrupt, SA_INTERRUPT, "Spitzkbd PwrOn", spitzkbd);
418 request_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd_hinge_isr, SA_INTERRUPT, "Spitzkbd SWA", spitzkbd);
419 request_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd_hinge_isr, SA_INTERRUPT, "Spitzkbd SWB", spitzkbd);
420
421 set_irq_type(SPITZ_IRQ_GPIO_SYNC, IRQT_BOTHEDGE);
422 set_irq_type(SPITZ_IRQ_GPIO_ON_KEY, IRQT_BOTHEDGE);
423 set_irq_type(SPITZ_IRQ_GPIO_SWA, IRQT_BOTHEDGE);
424 set_irq_type(SPITZ_IRQ_GPIO_SWB, IRQT_BOTHEDGE);
425
426 printk(KERN_INFO "input: Spitz Keyboard Registered\n");
427
428 return 0;
429}
430
431static int spitzkbd_remove(struct device *dev)
432{
433 int i;
434 struct spitzkbd *spitzkbd = dev_get_drvdata(dev);
435
436 for (i = 0; i < SPITZ_KEY_SENSE_NUM; i++)
437 free_irq(IRQ_GPIO(spitz_senses[i]), spitzkbd);
438
439 free_irq(SPITZ_IRQ_GPIO_SYNC, spitzkbd);
440 free_irq(SPITZ_IRQ_GPIO_ON_KEY, spitzkbd);
441 free_irq(SPITZ_IRQ_GPIO_SWA, spitzkbd);
442 free_irq(SPITZ_IRQ_GPIO_SWB, spitzkbd);
443
444 del_timer_sync(&spitzkbd->htimer);
445 del_timer_sync(&spitzkbd->timer);
446
447 input_unregister_device(&spitzkbd->input);
448
449 kfree(spitzkbd);
450
451 return 0;
452}
453
454static struct device_driver spitzkbd_driver = {
455 .name = "spitz-keyboard",
456 .bus = &platform_bus_type,
457 .probe = spitzkbd_probe,
458 .remove = spitzkbd_remove,
459 .suspend = spitzkbd_suspend,
460 .resume = spitzkbd_resume,
461};
462
463static int __devinit spitzkbd_init(void)
464{
465 return driver_register(&spitzkbd_driver);
466}
467
468static void __exit spitzkbd_exit(void)
469{
470 driver_unregister(&spitzkbd_driver);
471}
472
473module_init(spitzkbd_init);
474module_exit(spitzkbd_exit);
475
476MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
477MODULE_DESCRIPTION("Spitz Keyboard Driver");
478MODULE_LICENSE("GPLv2");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 0489af5a80c9..21d55ed4b88a 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -24,17 +24,17 @@ config TOUCHSCREEN_BITSY
24 module will be called h3600_ts_input. 24 module will be called h3600_ts_input.
25 25
26config TOUCHSCREEN_CORGI 26config TOUCHSCREEN_CORGI
27 tristate "Corgi touchscreen (for Sharp SL-C7xx)" 27 tristate "SharpSL (Corgi and Spitz series) touchscreen driver"
28 depends on PXA_SHARPSL 28 depends on PXA_SHARPSL
29 default y 29 default y
30 help 30 help
31 Say Y here to enable the driver for the touchscreen on the 31 Say Y here to enable the driver for the touchscreen on the
32 Sharp SL-C7xx series of PDAs. 32 Sharp SL-C7xx and SL-Cxx00 series of PDAs.
33 33
34 If unsure, say N. 34 If unsure, say N.
35 35
36 To compile this driver as a module, choose M here: the 36 To compile this driver as a module, choose M here: the
37 module will be called ads7846_ts. 37 module will be called corgi_ts.
38 38
39config TOUCHSCREEN_GUNZE 39config TOUCHSCREEN_GUNZE
40 tristate "Gunze AHL-51S touchscreen" 40 tristate "Gunze AHL-51S touchscreen"
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
index 5d19261b884f..4c7fbe550365 100644
--- a/drivers/input/touchscreen/corgi_ts.c
+++ b/drivers/input/touchscreen/corgi_ts.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Touchscreen driver for Sharp Corgi models (SL-C7xx) 2 * Touchscreen driver for Sharp SL-C7xx and SL-Cxx00 models
3 * 3 *
4 * Copyright (c) 2004-2005 Richard Purdie 4 * Copyright (c) 2004-2005 Richard Purdie
5 * 5 *
@@ -19,7 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <asm/irq.h> 20#include <asm/irq.h>
21 21
22#include <asm/arch/corgi.h> 22#include <asm/arch/sharpsl.h>
23#include <asm/arch/hardware.h> 23#include <asm/arch/hardware.h>
24#include <asm/arch/pxa-regs.h> 24#include <asm/arch/pxa-regs.h>
25 25
@@ -47,15 +47,20 @@ struct corgi_ts {
47 struct ts_event tc; 47 struct ts_event tc;
48 int pendown; 48 int pendown;
49 int power_mode; 49 int power_mode;
50 int irq_gpio;
51 struct corgits_machinfo *machinfo;
50}; 52};
51 53
52#define STATUS_HSYNC (GPLR(CORGI_GPIO_HSYNC) & GPIO_bit(CORGI_GPIO_HSYNC)) 54#ifdef CONFIG_PXA25x
53
54#define SyncHS() while((STATUS_HSYNC) == 0); while((STATUS_HSYNC) != 0);
55#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a)) 55#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
56#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x)) 56#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
57#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x)) 57#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
58 58#endif
59#ifdef CONFIG_PXA27x
60#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C1, 0" : "=r"(a))
61#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C1, 0" : "=r"(x))
62#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C1, 0" : : "r"(x))
63#endif
59 64
60/* ADS7846 Touch Screen Controller bit definitions */ 65/* ADS7846 Touch Screen Controller bit definitions */
61#define ADSCTRL_PD0 (1u << 0) /* PD0 */ 66#define ADSCTRL_PD0 (1u << 0) /* PD0 */
@@ -66,12 +71,11 @@ struct corgi_ts {
66#define ADSCTRL_STS (1u << 7) /* Start Bit */ 71#define ADSCTRL_STS (1u << 7) /* Start Bit */
67 72
68/* External Functions */ 73/* External Functions */
69extern unsigned long w100fb_get_hsynclen(struct device *dev);
70extern unsigned int get_clk_frequency_khz(int info); 74extern unsigned int get_clk_frequency_khz(int info);
71 75
72static unsigned long calc_waittime(void) 76static unsigned long calc_waittime(struct corgi_ts *corgi_ts)
73{ 77{
74 unsigned long hsync_len = w100fb_get_hsynclen(&corgifb_device.dev); 78 unsigned long hsync_len = corgi_ts->machinfo->get_hsync_len();
75 79
76 if (hsync_len) 80 if (hsync_len)
77 return get_clk_frequency_khz(0)*1000/hsync_len; 81 return get_clk_frequency_khz(0)*1000/hsync_len;
@@ -79,7 +83,8 @@ static unsigned long calc_waittime(void)
79 return 0; 83 return 0;
80} 84}
81 85
82static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int address, unsigned long wait_time) 86static int sync_receive_data_send_cmd(struct corgi_ts *corgi_ts, int doRecive, int doSend,
87 unsigned int address, unsigned long wait_time)
83{ 88{
84 unsigned long timer1 = 0, timer2, pmnc = 0; 89 unsigned long timer1 = 0, timer2, pmnc = 0;
85 int pos = 0; 90 int pos = 0;
@@ -90,7 +95,7 @@ static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int add
90 PMNC_SET(0x01); 95 PMNC_SET(0x01);
91 96
92 /* polling HSync */ 97 /* polling HSync */
93 SyncHS(); 98 corgi_ts->machinfo->wait_hsync();
94 /* get CCNT */ 99 /* get CCNT */
95 CCNT(timer1); 100 CCNT(timer1);
96 } 101 }
@@ -109,7 +114,7 @@ static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int add
109 CCNT(timer2); 114 CCNT(timer2);
110 if (timer2-timer1 > wait_time) { 115 if (timer2-timer1 > wait_time) {
111 /* too slow - timeout, try again */ 116 /* too slow - timeout, try again */
112 SyncHS(); 117 corgi_ts->machinfo->wait_hsync();
113 /* get OSCR */ 118 /* get OSCR */
114 CCNT(timer1); 119 CCNT(timer1);
115 /* Wait after HSync */ 120 /* Wait after HSync */
@@ -133,23 +138,23 @@ static int read_xydata(struct corgi_ts *corgi_ts)
133 /* critical section */ 138 /* critical section */
134 local_irq_save(flags); 139 local_irq_save(flags);
135 corgi_ssp_ads7846_lock(); 140 corgi_ssp_ads7846_lock();
136 wait_time=calc_waittime(); 141 wait_time = calc_waittime(corgi_ts);
137 142
138 /* Y-axis */ 143 /* Y-axis */
139 sync_receive_data_send_cmd(0, 1, 1u, wait_time); 144 sync_receive_data_send_cmd(corgi_ts, 0, 1, 1u, wait_time);
140 145
141 /* Y-axis */ 146 /* Y-axis */
142 sync_receive_data_send_cmd(1, 1, 1u, wait_time); 147 sync_receive_data_send_cmd(corgi_ts, 1, 1, 1u, wait_time);
143 148
144 /* X-axis */ 149 /* X-axis */
145 y = sync_receive_data_send_cmd(1, 1, 5u, wait_time); 150 y = sync_receive_data_send_cmd(corgi_ts, 1, 1, 5u, wait_time);
146 151
147 /* Z1 */ 152 /* Z1 */
148 x = sync_receive_data_send_cmd(1, 1, 3u, wait_time); 153 x = sync_receive_data_send_cmd(corgi_ts, 1, 1, 3u, wait_time);
149 154
150 /* Z2 */ 155 /* Z2 */
151 z1 = sync_receive_data_send_cmd(1, 1, 4u, wait_time); 156 z1 = sync_receive_data_send_cmd(corgi_ts, 1, 1, 4u, wait_time);
152 z2 = sync_receive_data_send_cmd(1, 0, 4u, wait_time); 157 z2 = sync_receive_data_send_cmd(corgi_ts, 1, 0, 4u, wait_time);
153 158
154 /* Power-Down Enable */ 159 /* Power-Down Enable */
155 corgi_ssp_ads7846_put((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS); 160 corgi_ssp_ads7846_put((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
@@ -189,9 +194,9 @@ static void new_data(struct corgi_ts *corgi_ts, struct pt_regs *regs)
189 194
190static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer, struct pt_regs *regs) 195static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer, struct pt_regs *regs)
191{ 196{
192 if ((GPLR(CORGI_GPIO_TP_INT) & GPIO_bit(CORGI_GPIO_TP_INT)) == 0) { 197 if ((GPLR(IRQ_TO_GPIO(corgi_ts->irq_gpio)) & GPIO_bit(IRQ_TO_GPIO(corgi_ts->irq_gpio))) == 0) {
193 /* Disable Interrupt */ 198 /* Disable Interrupt */
194 set_irq_type(CORGI_IRQ_GPIO_TP_INT, IRQT_NOEDGE); 199 set_irq_type(corgi_ts->irq_gpio, IRQT_NOEDGE);
195 if (read_xydata(corgi_ts)) { 200 if (read_xydata(corgi_ts)) {
196 corgi_ts->pendown = 1; 201 corgi_ts->pendown = 1;
197 new_data(corgi_ts, regs); 202 new_data(corgi_ts, regs);
@@ -210,7 +215,7 @@ static void ts_interrupt_main(struct corgi_ts *corgi_ts, int isTimer, struct pt_
210 } 215 }
211 216
212 /* Enable Falling Edge */ 217 /* Enable Falling Edge */
213 set_irq_type(CORGI_IRQ_GPIO_TP_INT, IRQT_FALLING); 218 set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
214 corgi_ts->pendown = 0; 219 corgi_ts->pendown = 0;
215 } 220 }
216} 221}
@@ -254,7 +259,7 @@ static int corgits_resume(struct device *dev, uint32_t level)
254 259
255 corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS); 260 corgi_ssp_ads7846_putget((4u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
256 /* Enable Falling Edge */ 261 /* Enable Falling Edge */
257 set_irq_type(CORGI_IRQ_GPIO_TP_INT, IRQT_FALLING); 262 set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
258 corgi_ts->power_mode = PWR_MODE_ACTIVE; 263 corgi_ts->power_mode = PWR_MODE_ACTIVE;
259 } 264 }
260 return 0; 265 return 0;
@@ -267,6 +272,7 @@ static int corgits_resume(struct device *dev, uint32_t level)
267static int __init corgits_probe(struct device *dev) 272static int __init corgits_probe(struct device *dev)
268{ 273{
269 struct corgi_ts *corgi_ts; 274 struct corgi_ts *corgi_ts;
275 struct platform_device *pdev = to_platform_device(dev);
270 276
271 if (!(corgi_ts = kmalloc(sizeof(struct corgi_ts), GFP_KERNEL))) 277 if (!(corgi_ts = kmalloc(sizeof(struct corgi_ts), GFP_KERNEL)))
272 return -ENOMEM; 278 return -ENOMEM;
@@ -275,6 +281,14 @@ static int __init corgits_probe(struct device *dev)
275 281
276 memset(corgi_ts, 0, sizeof(struct corgi_ts)); 282 memset(corgi_ts, 0, sizeof(struct corgi_ts));
277 283
284 corgi_ts->machinfo = dev->platform_data;
285 corgi_ts->irq_gpio = platform_get_irq(pdev, 0);
286
287 if (corgi_ts->irq_gpio < 0) {
288 kfree(corgi_ts);
289 return -ENODEV;
290 }
291
278 init_input_dev(&corgi_ts->input); 292 init_input_dev(&corgi_ts->input);
279 corgi_ts->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); 293 corgi_ts->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
280 corgi_ts->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); 294 corgi_ts->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
@@ -293,8 +307,7 @@ static int __init corgits_probe(struct device *dev)
293 corgi_ts->input.id.product = 0x0002; 307 corgi_ts->input.id.product = 0x0002;
294 corgi_ts->input.id.version = 0x0100; 308 corgi_ts->input.id.version = 0x0100;
295 309
296 pxa_gpio_mode(CORGI_GPIO_TP_INT | GPIO_IN); 310 pxa_gpio_mode(IRQ_TO_GPIO(corgi_ts->irq_gpio) | GPIO_IN);
297 pxa_gpio_mode(CORGI_GPIO_HSYNC | GPIO_IN);
298 311
299 /* Initiaize ADS7846 Difference Reference mode */ 312 /* Initiaize ADS7846 Difference Reference mode */
300 corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS); 313 corgi_ssp_ads7846_putget((1u << ADSCTRL_ADR_SH) | ADSCTRL_STS);
@@ -313,14 +326,14 @@ static int __init corgits_probe(struct device *dev)
313 input_register_device(&corgi_ts->input); 326 input_register_device(&corgi_ts->input);
314 corgi_ts->power_mode = PWR_MODE_ACTIVE; 327 corgi_ts->power_mode = PWR_MODE_ACTIVE;
315 328
316 if (request_irq(CORGI_IRQ_GPIO_TP_INT, ts_interrupt, SA_INTERRUPT, "ts", corgi_ts)) { 329 if (request_irq(corgi_ts->irq_gpio, ts_interrupt, SA_INTERRUPT, "ts", corgi_ts)) {
317 input_unregister_device(&corgi_ts->input); 330 input_unregister_device(&corgi_ts->input);
318 kfree(corgi_ts); 331 kfree(corgi_ts);
319 return -EBUSY; 332 return -EBUSY;
320 } 333 }
321 334
322 /* Enable Falling Edge */ 335 /* Enable Falling Edge */
323 set_irq_type(CORGI_IRQ_GPIO_TP_INT, IRQT_FALLING); 336 set_irq_type(corgi_ts->irq_gpio, IRQT_FALLING);
324 337
325 printk(KERN_INFO "input: Corgi Touchscreen Registered\n"); 338 printk(KERN_INFO "input: Corgi Touchscreen Registered\n");
326 339
@@ -331,8 +344,9 @@ static int corgits_remove(struct device *dev)
331{ 344{
332 struct corgi_ts *corgi_ts = dev_get_drvdata(dev); 345 struct corgi_ts *corgi_ts = dev_get_drvdata(dev);
333 346
334 free_irq(CORGI_IRQ_GPIO_TP_INT, NULL); 347 free_irq(corgi_ts->irq_gpio, NULL);
335 del_timer_sync(&corgi_ts->timer); 348 del_timer_sync(&corgi_ts->timer);
349 corgi_ts->machinfo->put_hsync();
336 input_unregister_device(&corgi_ts->input); 350 input_unregister_device(&corgi_ts->input);
337 kfree(corgi_ts); 351 kfree(corgi_ts);
338 return 0; 352 return 0;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index e1f0d87de0eb..0b0ea26023e5 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -287,12 +287,12 @@ divert_dev_init(void)
287 init_waitqueue_head(&rd_queue); 287 init_waitqueue_head(&rd_queue);
288 288
289#ifdef CONFIG_PROC_FS 289#ifdef CONFIG_PROC_FS
290 isdn_proc_entry = create_proc_entry("isdn", S_IFDIR | S_IRUGO | S_IXUGO, proc_net); 290 isdn_proc_entry = proc_mkdir("net/isdn", NULL);
291 if (!isdn_proc_entry) 291 if (!isdn_proc_entry)
292 return (-1); 292 return (-1);
293 isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry); 293 isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry);
294 if (!isdn_divert_entry) { 294 if (!isdn_divert_entry) {
295 remove_proc_entry("isdn", proc_net); 295 remove_proc_entry("net/isdn", NULL);
296 return (-1); 296 return (-1);
297 } 297 }
298 isdn_divert_entry->proc_fops = &isdn_fops; 298 isdn_divert_entry->proc_fops = &isdn_fops;
@@ -312,7 +312,7 @@ divert_dev_deinit(void)
312 312
313#ifdef CONFIG_PROC_FS 313#ifdef CONFIG_PROC_FS
314 remove_proc_entry("divert", isdn_proc_entry); 314 remove_proc_entry("divert", isdn_proc_entry);
315 remove_proc_entry("isdn", proc_net); 315 remove_proc_entry("net/isdn", NULL);
316#endif /* CONFIG_PROC_FS */ 316#endif /* CONFIG_PROC_FS */
317 317
318 return (0); 318 return (0);
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index 7fdf8ae5be52..27204f4b111a 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -30,8 +30,6 @@ static char *DRIVERNAME =
30static char *DRIVERLNAME = "divadidd"; 30static char *DRIVERLNAME = "divadidd";
31char *DRIVERRELEASE_DIDD = "2.0"; 31char *DRIVERRELEASE_DIDD = "2.0";
32 32
33static char *main_proc_dir = "eicon";
34
35MODULE_DESCRIPTION("DIDD table driver for diva drivers"); 33MODULE_DESCRIPTION("DIDD table driver for diva drivers");
36MODULE_AUTHOR("Cytronics & Melware, Eicon Networks"); 34MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
37MODULE_SUPPORTED_DEVICE("Eicon diva drivers"); 35MODULE_SUPPORTED_DEVICE("Eicon diva drivers");
@@ -89,7 +87,7 @@ proc_read(char *page, char **start, off_t off, int count, int *eof,
89 87
90static int DIVA_INIT_FUNCTION create_proc(void) 88static int DIVA_INIT_FUNCTION create_proc(void)
91{ 89{
92 proc_net_eicon = create_proc_entry(main_proc_dir, S_IFDIR, proc_net); 90 proc_net_eicon = proc_mkdir("net/eicon", NULL);
93 91
94 if (proc_net_eicon) { 92 if (proc_net_eicon) {
95 if ((proc_didd = 93 if ((proc_didd =
@@ -105,7 +103,7 @@ static int DIVA_INIT_FUNCTION create_proc(void)
105static void DIVA_EXIT_FUNCTION remove_proc(void) 103static void DIVA_EXIT_FUNCTION remove_proc(void)
106{ 104{
107 remove_proc_entry(DRIVERLNAME, proc_net_eicon); 105 remove_proc_entry(DRIVERLNAME, proc_net_eicon);
108 remove_proc_entry(main_proc_dir, proc_net); 106 remove_proc_entry("net/eicon", NULL);
109} 107}
110 108
111static int DIVA_INIT_FUNCTION divadidd_init(void) 109static int DIVA_INIT_FUNCTION divadidd_init(void)
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index b6435589d459..c12efa6f8429 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -381,7 +381,7 @@ int create_adapter_proc(diva_os_xdi_adapter_t * a)
381 char tmp[16]; 381 char tmp[16];
382 382
383 sprintf(tmp, "%s%d", adapter_dir_name, a->controller); 383 sprintf(tmp, "%s%d", adapter_dir_name, a->controller);
384 if (!(de = create_proc_entry(tmp, S_IFDIR, proc_net_eicon))) 384 if (!(de = proc_mkdir(tmp, proc_net_eicon)))
385 return (0); 385 return (0);
386 a->proc_adapter_dir = (void *) de; 386 a->proc_adapter_dir = (void *) de;
387 387
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8337b0f26cc4..4866fc32d8d9 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -61,6 +61,7 @@ static const PCI_ENTRY id_list[] =
61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"}, 61 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,"Digi International", "Digi DataFire Micro V (Europe)"},
62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"}, 62 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,"Digi International", "Digi DataFire Micro V IOM2 (North America)"},
63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"}, 63 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,"Digi International", "Digi DataFire Micro V (North America)"},
64 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2, "Sitecom Europe", "DC-105 ISDN PCI"},
64 {0, 0, NULL, NULL}, 65 {0, 0, NULL, NULL},
65}; 66};
66 67
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index c6b5bf7d2aca..dc334aab433e 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -611,7 +611,7 @@ static int sedlbauer_event(event_t event, int priority,
611} /* sedlbauer_event */ 611} /* sedlbauer_event */
612 612
613static struct pcmcia_device_id sedlbauer_ids[] = { 613static struct pcmcia_device_id sedlbauer_ids[] = {
614 PCMCIA_DEVICE_PROD_ID1234("SEDLBAUER", "speed star II", "V 3.1", "(c) 93 - 98 cb ", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a, 0x50d4149c), 614 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "speed star II", "V 3.1", 0x81fb79f5, 0xf3612e1d, 0x6b95c78a),
615 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90), 615 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D67", 0x81fb79f5, 0xe4e9bc12, 0x397b7e90),
616 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce), 616 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", "4D98", 0x81fb79f5, 0xe4e9bc12, 0x2e5c7fce),
617 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (C) 93-94 VK", 0x81fb79f5, 0xe4e9bc12, 0x8db143fe), 617 PCMCIA_DEVICE_PROD_ID123("SEDLBAUER", "ISDN-Adapter", " (C) 93-94 VK", 0x81fb79f5, 0xe4e9bc12, 0x8db143fe),
diff --git a/drivers/isdn/hisax/st5481.h b/drivers/isdn/hisax/st5481.h
index 0fda5c89429b..9ffaae7c657a 100644
--- a/drivers/isdn/hisax/st5481.h
+++ b/drivers/isdn/hisax/st5481.h
@@ -466,10 +466,10 @@ void st5481_stop(struct st5481_adapter *adapter);
466#define __debug_variable st5481_debug 466#define __debug_variable st5481_debug
467#include "hisax_debug.h" 467#include "hisax_debug.h"
468 468
469#ifdef CONFIG_HISAX_DEBUG
470
471extern int st5481_debug; 469extern int st5481_debug;
472 470
471#ifdef CONFIG_HISAX_DEBUG
472
473#define DBG_ISO_PACKET(level,urb) \ 473#define DBG_ISO_PACKET(level,urb) \
474 if (level & __debug_variable) dump_iso_packet(__FUNCTION__,urb) 474 if (level & __debug_variable) dump_iso_packet(__FUNCTION__,urb)
475 475
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 2fcd093921d8..657817a591fe 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -172,14 +172,18 @@ static void usb_b_out_complete(struct urb *urb, struct pt_regs *regs)
172 test_and_clear_bit(buf_nr, &b_out->busy); 172 test_and_clear_bit(buf_nr, &b_out->busy);
173 173
174 if (unlikely(urb->status < 0)) { 174 if (unlikely(urb->status < 0)) {
175 if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) { 175 switch (urb->status) {
176 WARN("urb status %d",urb->status); 176 case -ENOENT:
177 if (b_out->busy == 0) { 177 case -ESHUTDOWN:
178 st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2 | USB_DIR_OUT, NULL, NULL); 178 case -ECONNRESET:
179 } 179 DBG(4,"urb killed status %d", urb->status);
180 } else { 180 return; // Give up
181 DBG(1,"urb killed"); 181 default:
182 return; // Give up 182 WARN("urb status %d",urb->status);
183 if (b_out->busy == 0) {
184 st5481_usb_pipe_reset(adapter, (bcs->channel+1)*2 | USB_DIR_OUT, NULL, NULL);
185 }
186 break;
183 } 187 }
184 } 188 }
185 189
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index 071b1d31999f..941f7022ada1 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -382,16 +382,20 @@ static void usb_d_out_complete(struct urb *urb, struct pt_regs *regs)
382 test_and_clear_bit(buf_nr, &d_out->busy); 382 test_and_clear_bit(buf_nr, &d_out->busy);
383 383
384 if (unlikely(urb->status < 0)) { 384 if (unlikely(urb->status < 0)) {
385 if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) { 385 switch (urb->status) {
386 WARN("urb status %d",urb->status); 386 case -ENOENT:
387 if (d_out->busy == 0) { 387 case -ESHUTDOWN:
388 st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter); 388 case -ECONNRESET:
389 } 389 DBG(1,"urb killed status %d", urb->status);
390 return; 390 break;
391 } else { 391 default:
392 DBG(1,"urb killed"); 392 WARN("urb status %d",urb->status);
393 return; // Give up 393 if (d_out->busy == 0) {
394 st5481_usb_pipe_reset(adapter, EP_D_OUT | USB_DIR_OUT, fifo_reseted, adapter);
395 }
396 break;
394 } 397 }
398 return; // Give up
395 } 399 }
396 400
397 FsmEvent(&adapter->d_out.fsm, EV_DOUT_COMPLETE, (void *) buf_nr); 401 FsmEvent(&adapter->d_out.fsm, EV_DOUT_COMPLETE, (void *) buf_nr);
@@ -709,14 +713,14 @@ int st5481_setup_d(struct st5481_adapter *adapter)
709 713
710 adapter->l1m.fsm = &l1fsm; 714 adapter->l1m.fsm = &l1fsm;
711 adapter->l1m.state = ST_L1_F3; 715 adapter->l1m.state = ST_L1_F3;
712 adapter->l1m.debug = 1; 716 adapter->l1m.debug = st5481_debug & 0x100;
713 adapter->l1m.userdata = adapter; 717 adapter->l1m.userdata = adapter;
714 adapter->l1m.printdebug = l1m_debug; 718 adapter->l1m.printdebug = l1m_debug;
715 FsmInitTimer(&adapter->l1m, &adapter->timer); 719 FsmInitTimer(&adapter->l1m, &adapter->timer);
716 720
717 adapter->d_out.fsm.fsm = &dout_fsm; 721 adapter->d_out.fsm.fsm = &dout_fsm;
718 adapter->d_out.fsm.state = ST_DOUT_NONE; 722 adapter->d_out.fsm.state = ST_DOUT_NONE;
719 adapter->d_out.fsm.debug = 1; 723 adapter->d_out.fsm.debug = st5481_debug & 0x100;
720 adapter->d_out.fsm.userdata = adapter; 724 adapter->d_out.fsm.userdata = adapter;
721 adapter->d_out.fsm.printdebug = dout_debug; 725 adapter->d_out.fsm.printdebug = dout_debug;
722 726
diff --git a/drivers/isdn/hisax/st5481_init.c b/drivers/isdn/hisax/st5481_init.c
index 7aa810d5d333..2cf5d1a6df6c 100644
--- a/drivers/isdn/hisax/st5481_init.c
+++ b/drivers/isdn/hisax/st5481_init.c
@@ -43,10 +43,10 @@ static int number_of_leds = 2; /* 2 LEDs on the adpater default */
43module_param(number_of_leds, int, 0); 43module_param(number_of_leds, int, 0);
44 44
45#ifdef CONFIG_HISAX_DEBUG 45#ifdef CONFIG_HISAX_DEBUG
46static int debug = 0x1; 46static int debug = 0;
47module_param(debug, int, 0); 47module_param(debug, int, 0);
48int st5481_debug;
49#endif 48#endif
49int st5481_debug;
50 50
51static LIST_HEAD(adapter_list); 51static LIST_HEAD(adapter_list);
52 52
diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c
index ab62223297a5..89fbeb58485d 100644
--- a/drivers/isdn/hisax/st5481_usb.c
+++ b/drivers/isdn/hisax/st5481_usb.c
@@ -132,11 +132,15 @@ static void usb_ctrl_complete(struct urb *urb, struct pt_regs *regs)
132 struct ctrl_msg *ctrl_msg; 132 struct ctrl_msg *ctrl_msg;
133 133
134 if (unlikely(urb->status < 0)) { 134 if (unlikely(urb->status < 0)) {
135 if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) { 135 switch (urb->status) {
136 WARN("urb status %d",urb->status); 136 case -ENOENT:
137 } else { 137 case -ESHUTDOWN:
138 DBG(1,"urb killed"); 138 case -ECONNRESET:
139 return; // Give up 139 DBG(1,"urb killed status %d", urb->status);
140 return; // Give up
141 default:
142 WARN("urb status %d",urb->status);
143 break;
140 } 144 }
141 } 145 }
142 146
@@ -184,22 +188,22 @@ static void usb_int_complete(struct urb *urb, struct pt_regs *regs)
184 int status; 188 int status;
185 189
186 switch (urb->status) { 190 switch (urb->status) {
187 case 0: 191 case 0:
188 /* success */ 192 /* success */
189 break; 193 break;
190 case -ECONNRESET: 194 case -ECONNRESET:
191 case -ENOENT: 195 case -ENOENT:
192 case -ESHUTDOWN: 196 case -ESHUTDOWN:
193 /* this urb is terminated, clean up */ 197 /* this urb is terminated, clean up */
194 DBG(1, "urb shutting down with status: %d", urb->status); 198 DBG(2, "urb shutting down with status: %d", urb->status);
195 return; 199 return;
196 default: 200 default:
197 WARN("nonzero urb status received: %d", urb->status); 201 WARN("nonzero urb status received: %d", urb->status);
198 goto exit; 202 goto exit;
199 } 203 }
200 204
201 205
202 DBG_PACKET(1, data, INT_PKT_SIZE); 206 DBG_PACKET(2, data, INT_PKT_SIZE);
203 207
204 if (urb->actual_length == 0) { 208 if (urb->actual_length == 0) {
205 goto exit; 209 goto exit;
@@ -250,7 +254,7 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
250 struct urb *urb; 254 struct urb *urb;
251 u8 *buf; 255 u8 *buf;
252 256
253 DBG(1,""); 257 DBG(2,"");
254 258
255 if ((status = usb_reset_configuration (dev)) < 0) { 259 if ((status = usb_reset_configuration (dev)) < 0) {
256 WARN("reset_configuration failed,status=%d",status); 260 WARN("reset_configuration failed,status=%d",status);
@@ -330,15 +334,17 @@ void st5481_release_usb(struct st5481_adapter *adapter)
330 DBG(1,""); 334 DBG(1,"");
331 335
332 // Stop and free Control and Interrupt URBs 336 // Stop and free Control and Interrupt URBs
333 usb_unlink_urb(ctrl->urb); 337 usb_kill_urb(ctrl->urb);
334 if (ctrl->urb->transfer_buffer) 338 if (ctrl->urb->transfer_buffer)
335 kfree(ctrl->urb->transfer_buffer); 339 kfree(ctrl->urb->transfer_buffer);
336 usb_free_urb(ctrl->urb); 340 usb_free_urb(ctrl->urb);
341 ctrl->urb = NULL;
337 342
338 usb_unlink_urb(intr->urb); 343 usb_kill_urb(intr->urb);
339 if (intr->urb->transfer_buffer) 344 if (intr->urb->transfer_buffer)
340 kfree(intr->urb->transfer_buffer); 345 kfree(intr->urb->transfer_buffer);
341 usb_free_urb(intr->urb); 346 usb_free_urb(intr->urb);
347 ctrl->urb = NULL;
342} 348}
343 349
344/* 350/*
@@ -406,6 +412,7 @@ fill_isoc_urb(struct urb *urb, struct usb_device *dev,
406 spin_lock_init(&urb->lock); 412 spin_lock_init(&urb->lock);
407 urb->dev=dev; 413 urb->dev=dev;
408 urb->pipe=pipe; 414 urb->pipe=pipe;
415 urb->interval = 1;
409 urb->transfer_buffer=buf; 416 urb->transfer_buffer=buf;
410 urb->number_of_packets = num_packets; 417 urb->number_of_packets = num_packets;
411 urb->transfer_buffer_length=num_packets*packet_size; 418 urb->transfer_buffer_length=num_packets*packet_size;
@@ -452,7 +459,9 @@ st5481_setup_isocpipes(struct urb* urb[2], struct usb_device *dev,
452 if (urb[j]) { 459 if (urb[j]) {
453 if (urb[j]->transfer_buffer) 460 if (urb[j]->transfer_buffer)
454 kfree(urb[j]->transfer_buffer); 461 kfree(urb[j]->transfer_buffer);
462 urb[j]->transfer_buffer = NULL;
455 usb_free_urb(urb[j]); 463 usb_free_urb(urb[j]);
464 urb[j] = NULL;
456 } 465 }
457 } 466 }
458 return retval; 467 return retval;
@@ -463,10 +472,11 @@ void st5481_release_isocpipes(struct urb* urb[2])
463 int j; 472 int j;
464 473
465 for (j = 0; j < 2; j++) { 474 for (j = 0; j < 2; j++) {
466 usb_unlink_urb(urb[j]); 475 usb_kill_urb(urb[j]);
467 if (urb[j]->transfer_buffer) 476 if (urb[j]->transfer_buffer)
468 kfree(urb[j]->transfer_buffer); 477 kfree(urb[j]->transfer_buffer);
469 usb_free_urb(urb[j]); 478 usb_free_urb(urb[j]);
479 urb[j] = NULL;
470 } 480 }
471} 481}
472 482
@@ -485,11 +495,15 @@ static void usb_in_complete(struct urb *urb, struct pt_regs *regs)
485 int len, count, status; 495 int len, count, status;
486 496
487 if (unlikely(urb->status < 0)) { 497 if (unlikely(urb->status < 0)) {
488 if (urb->status != -ENOENT && urb->status != -ESHUTDOWN) { 498 switch (urb->status) {
489 WARN("urb status %d",urb->status); 499 case -ENOENT:
490 } else { 500 case -ESHUTDOWN:
491 DBG(1,"urb killed"); 501 case -ECONNRESET:
492 return; // Give up 502 DBG(1,"urb killed status %d", urb->status);
503 return; // Give up
504 default:
505 WARN("urb status %d",urb->status);
506 break;
493 } 507 }
494 } 508 }
495 509
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 5da507e532fc..639582f61f41 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -394,7 +394,7 @@ hysdn_procconf_init(void)
394 hysdn_card *card; 394 hysdn_card *card;
395 uchar conf_name[20]; 395 uchar conf_name[20];
396 396
397 hysdn_proc_entry = create_proc_entry(PROC_SUBDIR_NAME, S_IFDIR | S_IRUGO | S_IXUGO, proc_net); 397 hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, proc_net);
398 if (!hysdn_proc_entry) { 398 if (!hysdn_proc_entry) {
399 printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n"); 399 printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n");
400 return (-1); 400 return (-1);
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index 40b0df04ed9f..1ebed041672d 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -87,7 +87,7 @@ static int __init sc_init(void)
87 */ 87 */
88 for (i = 0 ; i < MAX_IO_REGS - 1 ; i++) { 88 for (i = 0 ; i < MAX_IO_REGS - 1 ; i++) {
89 if(!request_region(io[b] + i * 0x400, 1, "sc test")) { 89 if(!request_region(io[b] + i * 0x400, 1, "sc test")) {
90 pr_debug("check_region for 0x%x failed\n", io[b] + i * 0x400); 90 pr_debug("request_region for 0x%x failed\n", io[b] + i * 0x400);
91 io[b] = 0; 91 io[b] = 0;
92 break; 92 break;
93 } else 93 } else
@@ -181,7 +181,7 @@ static int __init sc_init(void)
181 for (i = SRAM_MIN ; i < SRAM_MAX ; i += SRAM_PAGESIZE) { 181 for (i = SRAM_MIN ; i < SRAM_MAX ; i += SRAM_PAGESIZE) {
182 pr_debug("Checking RAM address 0x%x...\n", i); 182 pr_debug("Checking RAM address 0x%x...\n", i);
183 if(request_region(i, SRAM_PAGESIZE, "sc test")) { 183 if(request_region(i, SRAM_PAGESIZE, "sc test")) {
184 pr_debug(" check_region succeeded\n"); 184 pr_debug(" request_region succeeded\n");
185 model = identify_board(i, io[b]); 185 model = identify_board(i, io[b]);
186 release_region(i, SRAM_PAGESIZE); 186 release_region(i, SRAM_PAGESIZE);
187 if (model >= 0) { 187 if (model >= 0) {
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index fb535737d17d..9b38674fbf75 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -8,21 +8,15 @@
8 */ 8 */
9 9
10/* 10/*
11 * For now, this driver includes:
12 * - RTC get & set
13 * - reboot & shutdown commands
14 * all synchronous with IRQ disabled (ugh)
15 *
16 * TODO: 11 * TODO:
17 * rework in a way the PMU driver works, that is asynchronous 12 * - maybe add timeout to commands ?
18 * with a queue of commands. I'll do that as soon as I have an 13 * - blocking version of time functions
19 * SMU based machine at hand. Some more cleanup is needed too, 14 * - polling version of i2c commands (including timer that works with
20 * like maybe fitting it into a platform device, etc... 15 * interrutps off)
21 * Also check what's up with cache coherency, and if we really 16 * - maybe avoid some data copies with i2c by directly using the smu cmd
22 * can't do better than flushing the cache, maybe build a table 17 * buffer and a lower level internal interface
23 * of command len/reply len like the PMU driver to only flush 18 * - understand SMU -> CPU events and implement reception of them via
24 * what is actually necessary. 19 * the userland interface
25 * --BenH.
26 */ 20 */
27 21
28#include <linux/config.h> 22#include <linux/config.h>
@@ -36,6 +30,11 @@
36#include <linux/jiffies.h> 30#include <linux/jiffies.h>
37#include <linux/interrupt.h> 31#include <linux/interrupt.h>
38#include <linux/rtc.h> 32#include <linux/rtc.h>
33#include <linux/completion.h>
34#include <linux/miscdevice.h>
35#include <linux/delay.h>
36#include <linux/sysdev.h>
37#include <linux/poll.h>
39 38
40#include <asm/byteorder.h> 39#include <asm/byteorder.h>
41#include <asm/io.h> 40#include <asm/io.h>
@@ -45,8 +44,13 @@
45#include <asm/smu.h> 44#include <asm/smu.h>
46#include <asm/sections.h> 45#include <asm/sections.h>
47#include <asm/abs_addr.h> 46#include <asm/abs_addr.h>
47#include <asm/uaccess.h>
48#include <asm/of_device.h>
49
50#define VERSION "0.6"
51#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp."
48 52
49#define DEBUG_SMU 1 53#undef DEBUG_SMU
50 54
51#ifdef DEBUG_SMU 55#ifdef DEBUG_SMU
52#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0) 56#define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0)
@@ -57,20 +61,30 @@
57/* 61/*
58 * This is the command buffer passed to the SMU hardware 62 * This is the command buffer passed to the SMU hardware
59 */ 63 */
64#define SMU_MAX_DATA 254
65
60struct smu_cmd_buf { 66struct smu_cmd_buf {
61 u8 cmd; 67 u8 cmd;
62 u8 length; 68 u8 length;
63 u8 data[0x0FFE]; 69 u8 data[SMU_MAX_DATA];
64}; 70};
65 71
66struct smu_device { 72struct smu_device {
67 spinlock_t lock; 73 spinlock_t lock;
68 struct device_node *of_node; 74 struct device_node *of_node;
69 int db_ack; /* doorbell ack GPIO */ 75 struct of_device *of_dev;
70 int db_req; /* doorbell req GPIO */ 76 int doorbell; /* doorbell gpio */
71 u32 __iomem *db_buf; /* doorbell buffer */ 77 u32 __iomem *db_buf; /* doorbell buffer */
78 int db_irq;
79 int msg;
80 int msg_irq;
72 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */ 81 struct smu_cmd_buf *cmd_buf; /* command buffer virtual */
73 u32 cmd_buf_abs; /* command buffer absolute */ 82 u32 cmd_buf_abs; /* command buffer absolute */
83 struct list_head cmd_list;
84 struct smu_cmd *cmd_cur; /* pending command */
85 struct list_head cmd_i2c_list;
86 struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */
87 struct timer_list i2c_timer;
74}; 88};
75 89
76/* 90/*
@@ -79,113 +93,245 @@ struct smu_device {
79 */ 93 */
80static struct smu_device *smu; 94static struct smu_device *smu;
81 95
96
82/* 97/*
83 * SMU low level communication stuff 98 * SMU driver low level stuff
84 */ 99 */
85static inline int smu_cmd_stat(struct smu_cmd_buf *cmd_buf, u8 cmd_ack)
86{
87 rmb();
88 return cmd_buf->cmd == cmd_ack && cmd_buf->length != 0;
89}
90 100
91static inline u8 smu_save_ack_cmd(struct smu_cmd_buf *cmd_buf) 101static void smu_start_cmd(void)
92{ 102{
93 return (~cmd_buf->cmd) & 0xff; 103 unsigned long faddr, fend;
94} 104 struct smu_cmd *cmd;
95 105
96static void smu_send_cmd(struct smu_device *dev) 106 if (list_empty(&smu->cmd_list))
97{ 107 return;
98 /* SMU command buf is currently cacheable, we need a physical 108
99 * address. This isn't exactly a DMA mapping here, I suspect 109 /* Fetch first command in queue */
110 cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link);
111 smu->cmd_cur = cmd;
112 list_del(&cmd->link);
113
114 DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
115 cmd->data_len);
116 DPRINTK("SMU: data buffer: %02x %02x %02x %02x ...\n",
117 ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
118 ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3]);
119
120 /* Fill the SMU command buffer */
121 smu->cmd_buf->cmd = cmd->cmd;
122 smu->cmd_buf->length = cmd->data_len;
123 memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len);
124
125 /* Flush command and data to RAM */
126 faddr = (unsigned long)smu->cmd_buf;
127 fend = faddr + smu->cmd_buf->length + 2;
128 flush_inval_dcache_range(faddr, fend);
129
130 /* This isn't exactly a DMA mapping here, I suspect
100 * the SMU is actually communicating with us via i2c to the 131 * the SMU is actually communicating with us via i2c to the
101 * northbridge or the CPU to access RAM. 132 * northbridge or the CPU to access RAM.
102 */ 133 */
103 writel(dev->cmd_buf_abs, dev->db_buf); 134 writel(smu->cmd_buf_abs, smu->db_buf);
104 135
105 /* Ring the SMU doorbell */ 136 /* Ring the SMU doorbell */
106 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, dev->db_req, 4); 137 pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4);
107 pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, dev->db_req, 4);
108} 138}
109 139
110static int smu_cmd_done(struct smu_device *dev) 140
141static irqreturn_t smu_db_intr(int irq, void *arg, struct pt_regs *regs)
111{ 142{
112 unsigned long wait = 0; 143 unsigned long flags;
113 int gpio; 144 struct smu_cmd *cmd;
145 void (*done)(struct smu_cmd *cmd, void *misc) = NULL;
146 void *misc = NULL;
147 u8 gpio;
148 int rc = 0;
114 149
115 /* Check the SMU doorbell */ 150 /* SMU completed the command, well, we hope, let's make sure
116 do { 151 * of it
117 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, 152 */
118 NULL, dev->db_ack); 153 spin_lock_irqsave(&smu->lock, flags);
119 if ((gpio & 7) == 7)
120 return 0;
121 udelay(100);
122 } while(++wait < 10000);
123 154
124 printk(KERN_ERR "SMU timeout !\n"); 155 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
125 return -ENXIO; 156 if ((gpio & 7) != 7) {
157 spin_unlock_irqrestore(&smu->lock, flags);
158 return IRQ_HANDLED;
159 }
160
161 cmd = smu->cmd_cur;
162 smu->cmd_cur = NULL;
163 if (cmd == NULL)
164 goto bail;
165
166 if (rc == 0) {
167 unsigned long faddr;
168 int reply_len;
169 u8 ack;
170
171 /* CPU might have brought back the cache line, so we need
172 * to flush again before peeking at the SMU response. We
173 * flush the entire buffer for now as we haven't read the
174 * reply lenght (it's only 2 cache lines anyway)
175 */
176 faddr = (unsigned long)smu->cmd_buf;
177 flush_inval_dcache_range(faddr, faddr + 256);
178
179 /* Now check ack */
180 ack = (~cmd->cmd) & 0xff;
181 if (ack != smu->cmd_buf->cmd) {
182 DPRINTK("SMU: incorrect ack, want %x got %x\n",
183 ack, smu->cmd_buf->cmd);
184 rc = -EIO;
185 }
186 reply_len = rc == 0 ? smu->cmd_buf->length : 0;
187 DPRINTK("SMU: reply len: %d\n", reply_len);
188 if (reply_len > cmd->reply_len) {
189 printk(KERN_WARNING "SMU: reply buffer too small,"
190 "got %d bytes for a %d bytes buffer\n",
191 reply_len, cmd->reply_len);
192 reply_len = cmd->reply_len;
193 }
194 cmd->reply_len = reply_len;
195 if (cmd->reply_buf && reply_len)
196 memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len);
197 }
198
199 /* Now complete the command. Write status last in order as we lost
200 * ownership of the command structure as soon as it's no longer -1
201 */
202 done = cmd->done;
203 misc = cmd->misc;
204 mb();
205 cmd->status = rc;
206 bail:
207 /* Start next command if any */
208 smu_start_cmd();
209 spin_unlock_irqrestore(&smu->lock, flags);
210
211 /* Call command completion handler if any */
212 if (done)
213 done(cmd, misc);
214
215 /* It's an edge interrupt, nothing to do */
216 return IRQ_HANDLED;
126} 217}
127 218
128static int smu_do_cmd(struct smu_device *dev) 219
220static irqreturn_t smu_msg_intr(int irq, void *arg, struct pt_regs *regs)
129{ 221{
130 int rc; 222 /* I don't quite know what to do with this one, we seem to never
131 u8 cmd_ack; 223 * receive it, so I suspect we have to arm it someway in the SMU
224 * to start getting events that way.
225 */
132 226
133 DPRINTK("SMU do_cmd %02x len=%d %02x\n", 227 printk(KERN_INFO "SMU: message interrupt !\n");
134 dev->cmd_buf->cmd, dev->cmd_buf->length,
135 dev->cmd_buf->data[0]);
136 228
137 cmd_ack = smu_save_ack_cmd(dev->cmd_buf); 229 /* It's an edge interrupt, nothing to do */
230 return IRQ_HANDLED;
231}
138 232
139 /* Clear cmd_buf cache lines */
140 flush_inval_dcache_range((unsigned long)dev->cmd_buf,
141 ((unsigned long)dev->cmd_buf) +
142 sizeof(struct smu_cmd_buf));
143 smu_send_cmd(dev);
144 rc = smu_cmd_done(dev);
145 if (rc == 0)
146 rc = smu_cmd_stat(dev->cmd_buf, cmd_ack) ? 0 : -1;
147 233
148 DPRINTK("SMU do_cmd %02x len=%d %02x => %d (%02x)\n", 234/*
149 dev->cmd_buf->cmd, dev->cmd_buf->length, 235 * Queued command management.
150 dev->cmd_buf->data[0], rc, cmd_ack); 236 *
237 */
238
239int smu_queue_cmd(struct smu_cmd *cmd)
240{
241 unsigned long flags;
151 242
152 return rc; 243 if (smu == NULL)
244 return -ENODEV;
245 if (cmd->data_len > SMU_MAX_DATA ||
246 cmd->reply_len > SMU_MAX_DATA)
247 return -EINVAL;
248
249 cmd->status = 1;
250 spin_lock_irqsave(&smu->lock, flags);
251 list_add_tail(&cmd->link, &smu->cmd_list);
252 if (smu->cmd_cur == NULL)
253 smu_start_cmd();
254 spin_unlock_irqrestore(&smu->lock, flags);
255
256 return 0;
153} 257}
258EXPORT_SYMBOL(smu_queue_cmd);
154 259
155/* RTC low level commands */ 260
156static inline int bcd2hex (int n) 261int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command,
262 unsigned int data_len,
263 void (*done)(struct smu_cmd *cmd, void *misc),
264 void *misc, ...)
157{ 265{
158 return (((n & 0xf0) >> 4) * 10) + (n & 0xf); 266 struct smu_cmd *cmd = &scmd->cmd;
267 va_list list;
268 int i;
269
270 if (data_len > sizeof(scmd->buffer))
271 return -EINVAL;
272
273 memset(scmd, 0, sizeof(*scmd));
274 cmd->cmd = command;
275 cmd->data_len = data_len;
276 cmd->data_buf = scmd->buffer;
277 cmd->reply_len = sizeof(scmd->buffer);
278 cmd->reply_buf = scmd->buffer;
279 cmd->done = done;
280 cmd->misc = misc;
281
282 va_start(list, misc);
283 for (i = 0; i < data_len; ++i)
284 scmd->buffer[i] = (u8)va_arg(list, int);
285 va_end(list);
286
287 return smu_queue_cmd(cmd);
159} 288}
289EXPORT_SYMBOL(smu_queue_simple);
160 290
161static inline int hex2bcd (int n) 291
292void smu_poll(void)
162{ 293{
163 return ((n / 10) << 4) + (n % 10); 294 u8 gpio;
295
296 if (smu == NULL)
297 return;
298
299 gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell);
300 if ((gpio & 7) == 7)
301 smu_db_intr(smu->db_irq, smu, NULL);
164} 302}
303EXPORT_SYMBOL(smu_poll);
165 304
166#if 0 305
167static inline void smu_fill_set_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 306void smu_done_complete(struct smu_cmd *cmd, void *misc)
168{ 307{
169 cmd_buf->cmd = 0x8e; 308 struct completion *comp = misc;
170 cmd_buf->length = 8; 309
171 cmd_buf->data[0] = 0x00; 310 complete(comp);
172 memset(cmd_buf->data + 1, 0, 7);
173} 311}
312EXPORT_SYMBOL(smu_done_complete);
313
174 314
175static inline void smu_fill_get_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 315void smu_spinwait_cmd(struct smu_cmd *cmd)
176{ 316{
177 cmd_buf->cmd = 0x8e; 317 while(cmd->status == 1)
178 cmd_buf->length = 1; 318 smu_poll();
179 cmd_buf->data[0] = 0x01;
180} 319}
320EXPORT_SYMBOL(smu_spinwait_cmd);
321
181 322
182static inline void smu_fill_dis_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) 323/* RTC low level commands */
324static inline int bcd2hex (int n)
183{ 325{
184 cmd_buf->cmd = 0x8e; 326 return (((n & 0xf0) >> 4) * 10) + (n & 0xf);
185 cmd_buf->length = 1;
186 cmd_buf->data[0] = 0x02;
187} 327}
188#endif 328
329
330static inline int hex2bcd (int n)
331{
332 return ((n / 10) << 4) + (n % 10);
333}
334
189 335
190static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf, 336static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
191 struct rtc_time *time) 337 struct rtc_time *time)
@@ -202,100 +348,96 @@ static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf,
202 cmd_buf->data[7] = hex2bcd(time->tm_year - 100); 348 cmd_buf->data[7] = hex2bcd(time->tm_year - 100);
203} 349}
204 350
205static inline void smu_fill_get_rtc_cmd(struct smu_cmd_buf *cmd_buf)
206{
207 cmd_buf->cmd = 0x8e;
208 cmd_buf->length = 1;
209 cmd_buf->data[0] = 0x81;
210}
211 351
212static void smu_parse_get_rtc_reply(struct smu_cmd_buf *cmd_buf, 352int smu_get_rtc_time(struct rtc_time *time, int spinwait)
213 struct rtc_time *time)
214{ 353{
215 time->tm_sec = bcd2hex(cmd_buf->data[0]); 354 struct smu_simple_cmd cmd;
216 time->tm_min = bcd2hex(cmd_buf->data[1]);
217 time->tm_hour = bcd2hex(cmd_buf->data[2]);
218 time->tm_wday = bcd2hex(cmd_buf->data[3]);
219 time->tm_mday = bcd2hex(cmd_buf->data[4]);
220 time->tm_mon = bcd2hex(cmd_buf->data[5]) - 1;
221 time->tm_year = bcd2hex(cmd_buf->data[6]) + 100;
222}
223
224int smu_get_rtc_time(struct rtc_time *time)
225{
226 unsigned long flags;
227 int rc; 355 int rc;
228 356
229 if (smu == NULL) 357 if (smu == NULL)
230 return -ENODEV; 358 return -ENODEV;
231 359
232 memset(time, 0, sizeof(struct rtc_time)); 360 memset(time, 0, sizeof(struct rtc_time));
233 spin_lock_irqsave(&smu->lock, flags); 361 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL,
234 smu_fill_get_rtc_cmd(smu->cmd_buf); 362 SMU_CMD_RTC_GET_DATETIME);
235 rc = smu_do_cmd(smu); 363 if (rc)
236 if (rc == 0) 364 return rc;
237 smu_parse_get_rtc_reply(smu->cmd_buf, time); 365 smu_spinwait_simple(&cmd);
238 spin_unlock_irqrestore(&smu->lock, flags);
239 366
240 return rc; 367 time->tm_sec = bcd2hex(cmd.buffer[0]);
368 time->tm_min = bcd2hex(cmd.buffer[1]);
369 time->tm_hour = bcd2hex(cmd.buffer[2]);
370 time->tm_wday = bcd2hex(cmd.buffer[3]);
371 time->tm_mday = bcd2hex(cmd.buffer[4]);
372 time->tm_mon = bcd2hex(cmd.buffer[5]) - 1;
373 time->tm_year = bcd2hex(cmd.buffer[6]) + 100;
374
375 return 0;
241} 376}
242 377
243int smu_set_rtc_time(struct rtc_time *time) 378
379int smu_set_rtc_time(struct rtc_time *time, int spinwait)
244{ 380{
245 unsigned long flags; 381 struct smu_simple_cmd cmd;
246 int rc; 382 int rc;
247 383
248 if (smu == NULL) 384 if (smu == NULL)
249 return -ENODEV; 385 return -ENODEV;
250 386
251 spin_lock_irqsave(&smu->lock, flags); 387 rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL,
252 smu_fill_set_rtc_cmd(smu->cmd_buf, time); 388 SMU_CMD_RTC_SET_DATETIME,
253 rc = smu_do_cmd(smu); 389 hex2bcd(time->tm_sec),
254 spin_unlock_irqrestore(&smu->lock, flags); 390 hex2bcd(time->tm_min),
391 hex2bcd(time->tm_hour),
392 time->tm_wday,
393 hex2bcd(time->tm_mday),
394 hex2bcd(time->tm_mon) + 1,
395 hex2bcd(time->tm_year - 100));
396 if (rc)
397 return rc;
398 smu_spinwait_simple(&cmd);
255 399
256 return rc; 400 return 0;
257} 401}
258 402
403
259void smu_shutdown(void) 404void smu_shutdown(void)
260{ 405{
261 const unsigned char *command = "SHUTDOWN"; 406 struct smu_simple_cmd cmd;
262 unsigned long flags;
263 407
264 if (smu == NULL) 408 if (smu == NULL)
265 return; 409 return;
266 410
267 spin_lock_irqsave(&smu->lock, flags); 411 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL,
268 smu->cmd_buf->cmd = 0xaa; 412 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0))
269 smu->cmd_buf->length = strlen(command); 413 return;
270 strcpy(smu->cmd_buf->data, command); 414 smu_spinwait_simple(&cmd);
271 smu_do_cmd(smu);
272 for (;;) 415 for (;;)
273 ; 416 ;
274 spin_unlock_irqrestore(&smu->lock, flags);
275} 417}
276 418
419
277void smu_restart(void) 420void smu_restart(void)
278{ 421{
279 const unsigned char *command = "RESTART"; 422 struct smu_simple_cmd cmd;
280 unsigned long flags;
281 423
282 if (smu == NULL) 424 if (smu == NULL)
283 return; 425 return;
284 426
285 spin_lock_irqsave(&smu->lock, flags); 427 if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL,
286 smu->cmd_buf->cmd = 0xaa; 428 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0))
287 smu->cmd_buf->length = strlen(command); 429 return;
288 strcpy(smu->cmd_buf->data, command); 430 smu_spinwait_simple(&cmd);
289 smu_do_cmd(smu);
290 for (;;) 431 for (;;)
291 ; 432 ;
292 spin_unlock_irqrestore(&smu->lock, flags);
293} 433}
294 434
435
295int smu_present(void) 436int smu_present(void)
296{ 437{
297 return smu != NULL; 438 return smu != NULL;
298} 439}
440EXPORT_SYMBOL(smu_present);
299 441
300 442
301int smu_init (void) 443int smu_init (void)
@@ -307,6 +449,8 @@ int smu_init (void)
307 if (np == NULL) 449 if (np == NULL)
308 return -ENODEV; 450 return -ENODEV;
309 451
452 printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR);
453
310 if (smu_cmdbuf_abs == 0) { 454 if (smu_cmdbuf_abs == 0) {
311 printk(KERN_ERR "SMU: Command buffer not allocated !\n"); 455 printk(KERN_ERR "SMU: Command buffer not allocated !\n");
312 return -EINVAL; 456 return -EINVAL;
@@ -318,7 +462,13 @@ int smu_init (void)
318 memset(smu, 0, sizeof(*smu)); 462 memset(smu, 0, sizeof(*smu));
319 463
320 spin_lock_init(&smu->lock); 464 spin_lock_init(&smu->lock);
465 INIT_LIST_HEAD(&smu->cmd_list);
466 INIT_LIST_HEAD(&smu->cmd_i2c_list);
321 smu->of_node = np; 467 smu->of_node = np;
468 smu->db_irq = NO_IRQ;
469 smu->msg_irq = NO_IRQ;
470 init_timer(&smu->i2c_timer);
471
322 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a 472 /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a
323 * 32 bits value safely 473 * 32 bits value safely
324 */ 474 */
@@ -331,8 +481,8 @@ int smu_init (void)
331 goto fail; 481 goto fail;
332 } 482 }
333 data = (u32 *)get_property(np, "reg", NULL); 483 data = (u32 *)get_property(np, "reg", NULL);
334 of_node_put(np);
335 if (data == NULL) { 484 if (data == NULL) {
485 of_node_put(np);
336 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); 486 printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n");
337 goto fail; 487 goto fail;
338 } 488 }
@@ -341,8 +491,31 @@ int smu_init (void)
341 * and ack. GPIOs are at 0x50, best would be to find that out 491 * and ack. GPIOs are at 0x50, best would be to find that out
342 * in the device-tree though. 492 * in the device-tree though.
343 */ 493 */
344 smu->db_req = 0x50 + *data; 494 smu->doorbell = *data;
345 smu->db_ack = 0x50 + *data; 495 if (smu->doorbell < 0x50)
496 smu->doorbell += 0x50;
497 if (np->n_intrs > 0)
498 smu->db_irq = np->intrs[0].line;
499
500 of_node_put(np);
501
502 /* Now look for the smu-interrupt GPIO */
503 do {
504 np = of_find_node_by_name(NULL, "smu-interrupt");
505 if (np == NULL)
506 break;
507 data = (u32 *)get_property(np, "reg", NULL);
508 if (data == NULL) {
509 of_node_put(np);
510 break;
511 }
512 smu->msg = *data;
513 if (smu->msg < 0x50)
514 smu->msg += 0x50;
515 if (np->n_intrs > 0)
516 smu->msg_irq = np->intrs[0].line;
517 of_node_put(np);
518 } while(0);
346 519
347 /* Doorbell buffer is currently hard-coded, I didn't find a proper 520 /* Doorbell buffer is currently hard-coded, I didn't find a proper
348 * device-tree entry giving the address. Best would probably to use 521 * device-tree entry giving the address. Best would probably to use
@@ -362,3 +535,584 @@ int smu_init (void)
362 return -ENXIO; 535 return -ENXIO;
363 536
364} 537}
538
539
540static int smu_late_init(void)
541{
542 if (!smu)
543 return 0;
544
545 /*
546 * Try to request the interrupts
547 */
548
549 if (smu->db_irq != NO_IRQ) {
550 if (request_irq(smu->db_irq, smu_db_intr,
551 SA_SHIRQ, "SMU doorbell", smu) < 0) {
552 printk(KERN_WARNING "SMU: can't "
553 "request interrupt %d\n",
554 smu->db_irq);
555 smu->db_irq = NO_IRQ;
556 }
557 }
558
559 if (smu->msg_irq != NO_IRQ) {
560 if (request_irq(smu->msg_irq, smu_msg_intr,
561 SA_SHIRQ, "SMU message", smu) < 0) {
562 printk(KERN_WARNING "SMU: can't "
563 "request interrupt %d\n",
564 smu->msg_irq);
565 smu->msg_irq = NO_IRQ;
566 }
567 }
568
569 return 0;
570}
571arch_initcall(smu_late_init);
572
573/*
574 * sysfs visibility
575 */
576
577static void smu_expose_childs(void *unused)
578{
579 struct device_node *np;
580
581 for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) {
582 if (device_is_compatible(np, "smu-i2c")) {
583 char name[32];
584 u32 *reg = (u32 *)get_property(np, "reg", NULL);
585
586 if (reg == NULL)
587 continue;
588 sprintf(name, "smu-i2c-%02x", *reg);
589 of_platform_device_create(np, name, &smu->of_dev->dev);
590 }
591 }
592
593}
594
595static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL);
596
597static int smu_platform_probe(struct of_device* dev,
598 const struct of_device_id *match)
599{
600 if (!smu)
601 return -ENODEV;
602 smu->of_dev = dev;
603
604 /*
605 * Ok, we are matched, now expose all i2c busses. We have to defer
606 * that unfortunately or it would deadlock inside the device model
607 */
608 schedule_work(&smu_expose_childs_work);
609
610 return 0;
611}
612
613static struct of_device_id smu_platform_match[] =
614{
615 {
616 .type = "smu",
617 },
618 {},
619};
620
621static struct of_platform_driver smu_of_platform_driver =
622{
623 .name = "smu",
624 .match_table = smu_platform_match,
625 .probe = smu_platform_probe,
626};
627
628static int __init smu_init_sysfs(void)
629{
630 int rc;
631
632 /*
633 * Due to sysfs bogosity, a sysdev is not a real device, so
634 * we should in fact create both if we want sysdev semantics
635 * for power management.
636 * For now, we don't power manage machines with an SMU chip,
637 * I'm a bit too far from figuring out how that works with those
638 * new chipsets, but that will come back and bite us
639 */
640 rc = of_register_driver(&smu_of_platform_driver);
641 return 0;
642}
643
644device_initcall(smu_init_sysfs);
645
646struct of_device *smu_get_ofdev(void)
647{
648 if (!smu)
649 return NULL;
650 return smu->of_dev;
651}
652
653EXPORT_SYMBOL_GPL(smu_get_ofdev);
654
655/*
656 * i2c interface
657 */
658
659static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail)
660{
661 void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done;
662 void *misc = cmd->misc;
663 unsigned long flags;
664
665 /* Check for read case */
666 if (!fail && cmd->read) {
667 if (cmd->pdata[0] < 1)
668 fail = 1;
669 else
670 memcpy(cmd->info.data, &cmd->pdata[1],
671 cmd->info.datalen);
672 }
673
674 DPRINTK("SMU: completing, success: %d\n", !fail);
675
676 /* Update status and mark no pending i2c command with lock
677 * held so nobody comes in while we dequeue an eventual
678 * pending next i2c command
679 */
680 spin_lock_irqsave(&smu->lock, flags);
681 smu->cmd_i2c_cur = NULL;
682 wmb();
683 cmd->status = fail ? -EIO : 0;
684
685 /* Is there another i2c command waiting ? */
686 if (!list_empty(&smu->cmd_i2c_list)) {
687 struct smu_i2c_cmd *newcmd;
688
689 /* Fetch it, new current, remove from list */
690 newcmd = list_entry(smu->cmd_i2c_list.next,
691 struct smu_i2c_cmd, link);
692 smu->cmd_i2c_cur = newcmd;
693 list_del(&cmd->link);
694
695 /* Queue with low level smu */
696 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
697 if (smu->cmd_cur == NULL)
698 smu_start_cmd();
699 }
700 spin_unlock_irqrestore(&smu->lock, flags);
701
702 /* Call command completion handler if any */
703 if (done)
704 done(cmd, misc);
705
706}
707
708
709static void smu_i2c_retry(unsigned long data)
710{
711 struct smu_i2c_cmd *cmd = (struct smu_i2c_cmd *)data;
712
713 DPRINTK("SMU: i2c failure, requeuing...\n");
714
715 /* requeue command simply by resetting reply_len */
716 cmd->pdata[0] = 0xff;
717 cmd->scmd.reply_len = 0x10;
718 smu_queue_cmd(&cmd->scmd);
719}
720
721
722static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc)
723{
724 struct smu_i2c_cmd *cmd = misc;
725 int fail = 0;
726
727 DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n",
728 cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len);
729
730 /* Check for possible status */
731 if (scmd->status < 0)
732 fail = 1;
733 else if (cmd->read) {
734 if (cmd->stage == 0)
735 fail = cmd->pdata[0] != 0;
736 else
737 fail = cmd->pdata[0] >= 0x80;
738 } else {
739 fail = cmd->pdata[0] != 0;
740 }
741
742 /* Handle failures by requeuing command, after 5ms interval
743 */
744 if (fail && --cmd->retries > 0) {
745 DPRINTK("SMU: i2c failure, starting timer...\n");
746 smu->i2c_timer.function = smu_i2c_retry;
747 smu->i2c_timer.data = (unsigned long)cmd;
748 smu->i2c_timer.expires = jiffies + msecs_to_jiffies(5);
749 add_timer(&smu->i2c_timer);
750 return;
751 }
752
753 /* If failure or stage 1, command is complete */
754 if (fail || cmd->stage != 0) {
755 smu_i2c_complete_command(cmd, fail);
756 return;
757 }
758
759 DPRINTK("SMU: going to stage 1\n");
760
761 /* Ok, initial command complete, now poll status */
762 scmd->reply_buf = cmd->pdata;
763 scmd->reply_len = 0x10;
764 scmd->data_buf = cmd->pdata;
765 scmd->data_len = 1;
766 cmd->pdata[0] = 0;
767 cmd->stage = 1;
768 cmd->retries = 20;
769 smu_queue_cmd(scmd);
770}
771
772
773int smu_queue_i2c(struct smu_i2c_cmd *cmd)
774{
775 unsigned long flags;
776
777 if (smu == NULL)
778 return -ENODEV;
779
780 /* Fill most fields of scmd */
781 cmd->scmd.cmd = SMU_CMD_I2C_COMMAND;
782 cmd->scmd.done = smu_i2c_low_completion;
783 cmd->scmd.misc = cmd;
784 cmd->scmd.reply_buf = cmd->pdata;
785 cmd->scmd.reply_len = 0x10;
786 cmd->scmd.data_buf = (u8 *)(char *)&cmd->info;
787 cmd->scmd.status = 1;
788 cmd->stage = 0;
789 cmd->pdata[0] = 0xff;
790 cmd->retries = 20;
791 cmd->status = 1;
792
793 /* Check transfer type, sanitize some "info" fields
794 * based on transfer type and do more checking
795 */
796 cmd->info.caddr = cmd->info.devaddr;
797 cmd->read = cmd->info.devaddr & 0x01;
798 switch(cmd->info.type) {
799 case SMU_I2C_TRANSFER_SIMPLE:
800 memset(&cmd->info.sublen, 0, 4);
801 break;
802 case SMU_I2C_TRANSFER_COMBINED:
803 cmd->info.devaddr &= 0xfe;
804 case SMU_I2C_TRANSFER_STDSUB:
805 if (cmd->info.sublen > 3)
806 return -EINVAL;
807 break;
808 default:
809 return -EINVAL;
810 }
811
812 /* Finish setting up command based on transfer direction
813 */
814 if (cmd->read) {
815 if (cmd->info.datalen > SMU_I2C_READ_MAX)
816 return -EINVAL;
817 memset(cmd->info.data, 0xff, cmd->info.datalen);
818 cmd->scmd.data_len = 9;
819 } else {
820 if (cmd->info.datalen > SMU_I2C_WRITE_MAX)
821 return -EINVAL;
822 cmd->scmd.data_len = 9 + cmd->info.datalen;
823 }
824
825 DPRINTK("SMU: i2c enqueuing command\n");
826 DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n",
827 cmd->read ? "read" : "write", cmd->info.datalen,
828 cmd->info.bus, cmd->info.caddr,
829 cmd->info.subaddr[0], cmd->info.type);
830
831
832 /* Enqueue command in i2c list, and if empty, enqueue also in
833 * main command list
834 */
835 spin_lock_irqsave(&smu->lock, flags);
836 if (smu->cmd_i2c_cur == NULL) {
837 smu->cmd_i2c_cur = cmd;
838 list_add_tail(&cmd->scmd.link, &smu->cmd_list);
839 if (smu->cmd_cur == NULL)
840 smu_start_cmd();
841 } else
842 list_add_tail(&cmd->link, &smu->cmd_i2c_list);
843 spin_unlock_irqrestore(&smu->lock, flags);
844
845 return 0;
846}
847
848
849
850/*
851 * Userland driver interface
852 */
853
854
855static LIST_HEAD(smu_clist);
856static DEFINE_SPINLOCK(smu_clist_lock);
857
858enum smu_file_mode {
859 smu_file_commands,
860 smu_file_events,
861 smu_file_closing
862};
863
864struct smu_private
865{
866 struct list_head list;
867 enum smu_file_mode mode;
868 int busy;
869 struct smu_cmd cmd;
870 spinlock_t lock;
871 wait_queue_head_t wait;
872 u8 buffer[SMU_MAX_DATA];
873};
874
875
876static int smu_open(struct inode *inode, struct file *file)
877{
878 struct smu_private *pp;
879 unsigned long flags;
880
881 pp = kmalloc(sizeof(struct smu_private), GFP_KERNEL);
882 if (pp == 0)
883 return -ENOMEM;
884 memset(pp, 0, sizeof(struct smu_private));
885 spin_lock_init(&pp->lock);
886 pp->mode = smu_file_commands;
887 init_waitqueue_head(&pp->wait);
888
889 spin_lock_irqsave(&smu_clist_lock, flags);
890 list_add(&pp->list, &smu_clist);
891 spin_unlock_irqrestore(&smu_clist_lock, flags);
892 file->private_data = pp;
893
894 return 0;
895}
896
897
898static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc)
899{
900 struct smu_private *pp = misc;
901
902 wake_up_all(&pp->wait);
903}
904
905
906static ssize_t smu_write(struct file *file, const char __user *buf,
907 size_t count, loff_t *ppos)
908{
909 struct smu_private *pp = file->private_data;
910 unsigned long flags;
911 struct smu_user_cmd_hdr hdr;
912 int rc = 0;
913
914 if (pp->busy)
915 return -EBUSY;
916 else if (copy_from_user(&hdr, buf, sizeof(hdr)))
917 return -EFAULT;
918 else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) {
919 pp->mode = smu_file_events;
920 return 0;
921 } else if (hdr.cmdtype != SMU_CMDTYPE_SMU)
922 return -EINVAL;
923 else if (pp->mode != smu_file_commands)
924 return -EBADFD;
925 else if (hdr.data_len > SMU_MAX_DATA)
926 return -EINVAL;
927
928 spin_lock_irqsave(&pp->lock, flags);
929 if (pp->busy) {
930 spin_unlock_irqrestore(&pp->lock, flags);
931 return -EBUSY;
932 }
933 pp->busy = 1;
934 pp->cmd.status = 1;
935 spin_unlock_irqrestore(&pp->lock, flags);
936
937 if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) {
938 pp->busy = 0;
939 return -EFAULT;
940 }
941
942 pp->cmd.cmd = hdr.cmd;
943 pp->cmd.data_len = hdr.data_len;
944 pp->cmd.reply_len = SMU_MAX_DATA;
945 pp->cmd.data_buf = pp->buffer;
946 pp->cmd.reply_buf = pp->buffer;
947 pp->cmd.done = smu_user_cmd_done;
948 pp->cmd.misc = pp;
949 rc = smu_queue_cmd(&pp->cmd);
950 if (rc < 0)
951 return rc;
952 return count;
953}
954
955
956static ssize_t smu_read_command(struct file *file, struct smu_private *pp,
957 char __user *buf, size_t count)
958{
959 DECLARE_WAITQUEUE(wait, current);
960 struct smu_user_reply_hdr hdr;
961 unsigned long flags;
962 int size, rc = 0;
963
964 if (!pp->busy)
965 return 0;
966 if (count < sizeof(struct smu_user_reply_hdr))
967 return -EOVERFLOW;
968 spin_lock_irqsave(&pp->lock, flags);
969 if (pp->cmd.status == 1) {
970 if (file->f_flags & O_NONBLOCK)
971 return -EAGAIN;
972 add_wait_queue(&pp->wait, &wait);
973 for (;;) {
974 set_current_state(TASK_INTERRUPTIBLE);
975 rc = 0;
976 if (pp->cmd.status != 1)
977 break;
978 rc = -ERESTARTSYS;
979 if (signal_pending(current))
980 break;
981 spin_unlock_irqrestore(&pp->lock, flags);
982 schedule();
983 spin_lock_irqsave(&pp->lock, flags);
984 }
985 set_current_state(TASK_RUNNING);
986 remove_wait_queue(&pp->wait, &wait);
987 }
988 spin_unlock_irqrestore(&pp->lock, flags);
989 if (rc)
990 return rc;
991 if (pp->cmd.status != 0)
992 pp->cmd.reply_len = 0;
993 size = sizeof(hdr) + pp->cmd.reply_len;
994 if (count < size)
995 size = count;
996 rc = size;
997 hdr.status = pp->cmd.status;
998 hdr.reply_len = pp->cmd.reply_len;
999 if (copy_to_user(buf, &hdr, sizeof(hdr)))
1000 return -EFAULT;
1001 size -= sizeof(hdr);
1002 if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size))
1003 return -EFAULT;
1004 pp->busy = 0;
1005
1006 return rc;
1007}
1008
1009
1010static ssize_t smu_read_events(struct file *file, struct smu_private *pp,
1011 char __user *buf, size_t count)
1012{
1013 /* Not implemented */
1014 msleep_interruptible(1000);
1015 return 0;
1016}
1017
1018
1019static ssize_t smu_read(struct file *file, char __user *buf,
1020 size_t count, loff_t *ppos)
1021{
1022 struct smu_private *pp = file->private_data;
1023
1024 if (pp->mode == smu_file_commands)
1025 return smu_read_command(file, pp, buf, count);
1026 if (pp->mode == smu_file_events)
1027 return smu_read_events(file, pp, buf, count);
1028
1029 return -EBADFD;
1030}
1031
1032static unsigned int smu_fpoll(struct file *file, poll_table *wait)
1033{
1034 struct smu_private *pp = file->private_data;
1035 unsigned int mask = 0;
1036 unsigned long flags;
1037
1038 if (pp == 0)
1039 return 0;
1040
1041 if (pp->mode == smu_file_commands) {
1042 poll_wait(file, &pp->wait, wait);
1043
1044 spin_lock_irqsave(&pp->lock, flags);
1045 if (pp->busy && pp->cmd.status != 1)
1046 mask |= POLLIN;
1047 spin_unlock_irqrestore(&pp->lock, flags);
1048 } if (pp->mode == smu_file_events) {
1049 /* Not yet implemented */
1050 }
1051 return mask;
1052}
1053
1054static int smu_release(struct inode *inode, struct file *file)
1055{
1056 struct smu_private *pp = file->private_data;
1057 unsigned long flags;
1058 unsigned int busy;
1059
1060 if (pp == 0)
1061 return 0;
1062
1063 file->private_data = NULL;
1064
1065 /* Mark file as closing to avoid races with new request */
1066 spin_lock_irqsave(&pp->lock, flags);
1067 pp->mode = smu_file_closing;
1068 busy = pp->busy;
1069
1070 /* Wait for any pending request to complete */
1071 if (busy && pp->cmd.status == 1) {
1072 DECLARE_WAITQUEUE(wait, current);
1073
1074 add_wait_queue(&pp->wait, &wait);
1075 for (;;) {
1076 set_current_state(TASK_UNINTERRUPTIBLE);
1077 if (pp->cmd.status != 1)
1078 break;
1079 spin_lock_irqsave(&pp->lock, flags);
1080 schedule();
1081 spin_unlock_irqrestore(&pp->lock, flags);
1082 }
1083 set_current_state(TASK_RUNNING);
1084 remove_wait_queue(&pp->wait, &wait);
1085 }
1086 spin_unlock_irqrestore(&pp->lock, flags);
1087
1088 spin_lock_irqsave(&smu_clist_lock, flags);
1089 list_del(&pp->list);
1090 spin_unlock_irqrestore(&smu_clist_lock, flags);
1091 kfree(pp);
1092
1093 return 0;
1094}
1095
1096
1097static struct file_operations smu_device_fops __pmacdata = {
1098 .llseek = no_llseek,
1099 .read = smu_read,
1100 .write = smu_write,
1101 .poll = smu_fpoll,
1102 .open = smu_open,
1103 .release = smu_release,
1104};
1105
1106static struct miscdevice pmu_device __pmacdata = {
1107 MISC_DYNAMIC_MINOR, "smu", &smu_device_fops
1108};
1109
1110static int smu_device_init(void)
1111{
1112 if (!smu)
1113 return -ENODEV;
1114 if (misc_register(&pmu_device) < 0)
1115 printk(KERN_ERR "via-pmu: cannot register misc device.\n");
1116 return 0;
1117}
1118device_initcall(smu_device_init);
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index c9ca1118e449..f38696622eb4 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -599,7 +599,7 @@ thermostat_init(void)
599 sensor_location[2] = "?"; 599 sensor_location[2] = "?";
600 } 600 }
601 601
602 of_dev = of_platform_device_create(np, "temperatures"); 602 of_dev = of_platform_device_create(np, "temperatures", NULL);
603 603
604 if (of_dev == NULL) { 604 if (of_dev == NULL) {
605 printk(KERN_ERR "Can't register temperatures device !\n"); 605 printk(KERN_ERR "Can't register temperatures device !\n");
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 703e31973314..cc507ceef153 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2051,7 +2051,7 @@ static int __init therm_pm72_init(void)
2051 return -ENODEV; 2051 return -ENODEV;
2052 } 2052 }
2053 } 2053 }
2054 of_dev = of_platform_device_create(np, "temperature"); 2054 of_dev = of_platform_device_create(np, "temperature", NULL);
2055 if (of_dev == NULL) { 2055 if (of_dev == NULL) {
2056 printk(KERN_ERR "Can't register FCU platform device !\n"); 2056 printk(KERN_ERR "Can't register FCU platform device !\n");
2057 return -ENODEV; 2057 return -ENODEV;
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index cbb72eb0426d..6aaa1df1a64e 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -504,7 +504,7 @@ g4fan_init( void )
504 } 504 }
505 if( !(np=of_find_node_by_name(NULL, "fan")) ) 505 if( !(np=of_find_node_by_name(NULL, "fan")) )
506 return -ENODEV; 506 return -ENODEV;
507 x.of_dev = of_platform_device_create( np, "temperature" ); 507 x.of_dev = of_platform_device_create(np, "temperature", NULL);
508 of_node_put( np ); 508 of_node_put( np );
509 509
510 if( !x.of_dev ) { 510 if( !x.of_dev ) {
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 200a0688f717..54ec737195e0 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -230,11 +230,20 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi
230 230
231static void __hash_remove(struct hash_cell *hc) 231static void __hash_remove(struct hash_cell *hc)
232{ 232{
233 struct dm_table *table;
234
233 /* remove from the dev hash */ 235 /* remove from the dev hash */
234 list_del(&hc->uuid_list); 236 list_del(&hc->uuid_list);
235 list_del(&hc->name_list); 237 list_del(&hc->name_list);
236 unregister_with_devfs(hc); 238 unregister_with_devfs(hc);
237 dm_set_mdptr(hc->md, NULL); 239 dm_set_mdptr(hc->md, NULL);
240
241 table = dm_get_table(hc->md);
242 if (table) {
243 dm_table_event(table);
244 dm_table_put(table);
245 }
246
238 dm_put(hc->md); 247 dm_put(hc->md);
239 if (hc->new_map) 248 if (hc->new_map)
240 dm_table_put(hc->new_map); 249 dm_table_put(hc->new_map);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 785806bdb248..f9b7b32d5d5c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -329,13 +329,17 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio,
329/* 329/*
330 * If we run out of usable paths, should we queue I/O or error it? 330 * If we run out of usable paths, should we queue I/O or error it?
331 */ 331 */
332static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path) 332static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
333 unsigned save_old_value)
333{ 334{
334 unsigned long flags; 335 unsigned long flags;
335 336
336 spin_lock_irqsave(&m->lock, flags); 337 spin_lock_irqsave(&m->lock, flags);
337 338
338 m->saved_queue_if_no_path = m->queue_if_no_path; 339 if (save_old_value)
340 m->saved_queue_if_no_path = m->queue_if_no_path;
341 else
342 m->saved_queue_if_no_path = queue_if_no_path;
339 m->queue_if_no_path = queue_if_no_path; 343 m->queue_if_no_path = queue_if_no_path;
340 if (!m->queue_if_no_path && m->queue_size) 344 if (!m->queue_if_no_path && m->queue_size)
341 queue_work(kmultipathd, &m->process_queued_ios); 345 queue_work(kmultipathd, &m->process_queued_ios);
@@ -677,7 +681,7 @@ static int parse_features(struct arg_set *as, struct multipath *m,
677 return 0; 681 return 0;
678 682
679 if (!strnicmp(shift(as), MESG_STR("queue_if_no_path"))) 683 if (!strnicmp(shift(as), MESG_STR("queue_if_no_path")))
680 return queue_if_no_path(m, 1); 684 return queue_if_no_path(m, 1, 0);
681 else { 685 else {
682 ti->error = "Unrecognised multipath feature request"; 686 ti->error = "Unrecognised multipath feature request";
683 return -EINVAL; 687 return -EINVAL;
@@ -1077,7 +1081,7 @@ static void multipath_presuspend(struct dm_target *ti)
1077{ 1081{
1078 struct multipath *m = (struct multipath *) ti->private; 1082 struct multipath *m = (struct multipath *) ti->private;
1079 1083
1080 queue_if_no_path(m, 0); 1084 queue_if_no_path(m, 0, 1);
1081} 1085}
1082 1086
1083/* 1087/*
@@ -1222,9 +1226,9 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1222 1226
1223 if (argc == 1) { 1227 if (argc == 1) {
1224 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) 1228 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path")))
1225 return queue_if_no_path(m, 1); 1229 return queue_if_no_path(m, 1, 0);
1226 else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) 1230 else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path")))
1227 return queue_if_no_path(m, 0); 1231 return queue_if_no_path(m, 0, 0);
1228 } 1232 }
1229 1233
1230 if (argc != 2) 1234 if (argc != 2)
diff --git a/drivers/md/raid6.h b/drivers/md/raid6.h
index f80ee6350edf..31cbee71365f 100644
--- a/drivers/md/raid6.h
+++ b/drivers/md/raid6.h
@@ -69,9 +69,13 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
69#define __init 69#define __init
70#define __exit 70#define __exit
71#define __attribute_const__ __attribute__((const)) 71#define __attribute_const__ __attribute__((const))
72#define noinline __attribute__((noinline))
72 73
73#define preempt_enable() 74#define preempt_enable()
74#define preempt_disable() 75#define preempt_disable()
76#define cpu_has_feature(x) 1
77#define enable_kernel_altivec()
78#define disable_kernel_altivec()
75 79
76#endif /* __KERNEL__ */ 80#endif /* __KERNEL__ */
77 81
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index acf386fc4b4f..51c63c0cf1c9 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -19,6 +19,7 @@
19#include "raid6.h" 19#include "raid6.h"
20#ifndef __KERNEL__ 20#ifndef __KERNEL__
21#include <sys/mman.h> 21#include <sys/mman.h>
22#include <stdio.h>
22#endif 23#endif
23 24
24struct raid6_calls raid6_call; 25struct raid6_calls raid6_call;
diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc
index 1de8f030eee0..b9afd35b8812 100644
--- a/drivers/md/raid6altivec.uc
+++ b/drivers/md/raid6altivec.uc
@@ -27,16 +27,20 @@
27#ifdef CONFIG_ALTIVEC 27#ifdef CONFIG_ALTIVEC
28 28
29#include <altivec.h> 29#include <altivec.h>
30#include <asm/system.h> 30#ifdef __KERNEL__
31#include <asm/cputable.h> 31# include <asm/system.h>
32# include <asm/cputable.h>
33#endif
32 34
33/* 35/*
34 * This is the C data type to use 36 * This is the C data type to use. We use a vector of
37 * signed char so vec_cmpgt() will generate the right
38 * instruction.
35 */ 39 */
36 40
37typedef vector unsigned char unative_t; 41typedef vector signed char unative_t;
38 42
39#define NBYTES(x) ((vector unsigned char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) 43#define NBYTES(x) ((vector signed char) {x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
40#define NSIZE sizeof(unative_t) 44#define NSIZE sizeof(unative_t)
41 45
42/* 46/*
@@ -108,7 +112,11 @@ int raid6_have_altivec(void);
108int raid6_have_altivec(void) 112int raid6_have_altivec(void)
109{ 113{
110 /* This assumes either all CPUs have Altivec or none does */ 114 /* This assumes either all CPUs have Altivec or none does */
115# ifdef __KERNEL__
111 return cpu_has_feature(CPU_FTR_ALTIVEC); 116 return cpu_has_feature(CPU_FTR_ALTIVEC);
117# else
118 return 1;
119# endif
112} 120}
113#endif 121#endif
114 122
diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile
index 557806728609..78e0396adf2a 100644
--- a/drivers/md/raid6test/Makefile
+++ b/drivers/md/raid6test/Makefile
@@ -8,6 +8,8 @@ OPTFLAGS = -O2 # Adjust as desired
8CFLAGS = -I.. -g $(OPTFLAGS) 8CFLAGS = -I.. -g $(OPTFLAGS)
9LD = ld 9LD = ld
10PERL = perl 10PERL = perl
11AR = ar
12RANLIB = ranlib
11 13
12.c.o: 14.c.o:
13 $(CC) $(CFLAGS) -c -o $@ $< 15 $(CC) $(CFLAGS) -c -o $@ $<
@@ -18,18 +20,33 @@ PERL = perl
18%.uc: ../%.uc 20%.uc: ../%.uc
19 cp -f $< $@ 21 cp -f $< $@
20 22
21all: raid6.o raid6test 23all: raid6.a raid6test
22 24
23raid6.o: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \ 25raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \
24 raid6int32.o \ 26 raid6int32.o \
25 raid6mmx.o raid6sse1.o raid6sse2.o \ 27 raid6mmx.o raid6sse1.o raid6sse2.o \
28 raid6altivec1.o raid6altivec2.o raid6altivec4.o raid6altivec8.o \
26 raid6recov.o raid6algos.o \ 29 raid6recov.o raid6algos.o \
27 raid6tables.o 30 raid6tables.o
28 $(LD) -r -o $@ $^ 31 rm -f $@
32 $(AR) cq $@ $^
33 $(RANLIB) $@
29 34
30raid6test: raid6.o test.c 35raid6test: test.c raid6.a
31 $(CC) $(CFLAGS) -o raid6test $^ 36 $(CC) $(CFLAGS) -o raid6test $^
32 37
38raid6altivec1.c: raid6altivec.uc ../unroll.pl
39 $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@
40
41raid6altivec2.c: raid6altivec.uc ../unroll.pl
42 $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@
43
44raid6altivec4.c: raid6altivec.uc ../unroll.pl
45 $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@
46
47raid6altivec8.c: raid6altivec.uc ../unroll.pl
48 $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@
49
33raid6int1.c: raid6int.uc ../unroll.pl 50raid6int1.c: raid6int.uc ../unroll.pl
34 $(PERL) ../unroll.pl 1 < raid6int.uc > $@ 51 $(PERL) ../unroll.pl 1 < raid6int.uc > $@
35 52
@@ -52,7 +69,7 @@ raid6tables.c: mktables
52 ./mktables > raid6tables.c 69 ./mktables > raid6tables.c
53 70
54clean: 71clean:
55 rm -f *.o mktables mktables.c raid6int.uc raid6*.c raid6test 72 rm -f *.o *.a mktables mktables.c raid6int.uc raid6*.c raid6test
56 73
57spotless: clean 74spotless: clean
58 rm -f *~ 75 rm -f *~
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 87d5f4d8790f..eaf130e666d8 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -100,8 +100,8 @@ static u8 tda10021_readreg (struct tda10021_state* state, u8 reg)
100 100
101 ret = i2c_transfer (state->i2c, msg, 2); 101 ret = i2c_transfer (state->i2c, msg, 2);
102 if (ret != 2) 102 if (ret != 2)
103 printk("DVB: TDA10021(%d): %s: readreg error (ret == %i)\n", 103 printk("DVB: TDA10021: %s: readreg error (ret == %i)\n",
104 state->frontend.dvb->num, __FUNCTION__, ret); 104 __FUNCTION__, ret);
105 return b1[0]; 105 return b1[0];
106} 106}
107 107
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 8b4ad70dd1b2..877c770558e9 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -29,7 +29,7 @@
29 29
30#include <linux/module.h> /* Modules */ 30#include <linux/module.h> /* Modules */
31#include <linux/init.h> /* Initdata */ 31#include <linux/init.h> /* Initdata */
32#include <linux/ioport.h> /* check_region, request_region */ 32#include <linux/ioport.h> /* request_region */
33#include <linux/delay.h> /* udelay */ 33#include <linux/delay.h> /* udelay */
34#include <asm/io.h> /* outb, outb_p */ 34#include <asm/io.h> /* outb, outb_p */
35#include <asm/uaccess.h> /* copy to/from user */ 35#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 013c835ed910..5319a9c9a979 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -26,7 +26,7 @@
26 26
27#include <linux/module.h> /* Modules */ 27#include <linux/module.h> /* Modules */
28#include <linux/init.h> /* Initdata */ 28#include <linux/init.h> /* Initdata */
29#include <linux/ioport.h> /* check_region, request_region */ 29#include <linux/ioport.h> /* request_region */
30#include <linux/delay.h> /* udelay */ 30#include <linux/delay.h> /* udelay */
31#include <asm/io.h> /* outb, outb_p */ 31#include <asm/io.h> /* outb, outb_p */
32#include <asm/uaccess.h> /* copy to/from user */ 32#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 53d399b6652b..022913da8c59 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -29,7 +29,7 @@
29 29
30#include <linux/module.h> /* Modules */ 30#include <linux/module.h> /* Modules */
31#include <linux/init.h> /* Initdata */ 31#include <linux/init.h> /* Initdata */
32#include <linux/ioport.h> /* check_region, request_region */ 32#include <linux/ioport.h> /* request_region */
33#include <linux/delay.h> /* udelay */ 33#include <linux/delay.h> /* udelay */
34#include <asm/io.h> /* outb, outb_p */ 34#include <asm/io.h> /* outb, outb_p */
35#include <asm/uaccess.h> /* copy to/from user */ 35#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index 202bfe6819b8..6418f03b9ce4 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -17,7 +17,7 @@
17 17
18#include <linux/module.h> /* Modules */ 18#include <linux/module.h> /* Modules */
19#include <linux/init.h> /* Initdata */ 19#include <linux/init.h> /* Initdata */
20#include <linux/ioport.h> /* check_region, request_region */ 20#include <linux/ioport.h> /* request_region */
21#include <linux/delay.h> /* udelay */ 21#include <linux/delay.h> /* udelay */
22#include <asm/io.h> /* outb, outb_p */ 22#include <asm/io.h> /* outb, outb_p */
23#include <asm/uaccess.h> /* copy to/from user */ 23#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c
index c00245d4d249..b2256d675b44 100644
--- a/drivers/media/radio/radio-rtrack2.c
+++ b/drivers/media/radio/radio-rtrack2.c
@@ -10,7 +10,7 @@
10 10
11#include <linux/module.h> /* Modules */ 11#include <linux/module.h> /* Modules */
12#include <linux/init.h> /* Initdata */ 12#include <linux/init.h> /* Initdata */
13#include <linux/ioport.h> /* check_region, request_region */ 13#include <linux/ioport.h> /* request_region */
14#include <linux/delay.h> /* udelay */ 14#include <linux/delay.h> /* udelay */
15#include <asm/io.h> /* outb, outb_p */ 15#include <asm/io.h> /* outb, outb_p */
16#include <asm/uaccess.h> /* copy to/from user */ 16#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 3a464a09221f..6f03ce4dd7b0 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -18,7 +18,7 @@
18#include <linux/kernel.h> /* __setup */ 18#include <linux/kernel.h> /* __setup */
19#include <linux/module.h> /* Modules */ 19#include <linux/module.h> /* Modules */
20#include <linux/init.h> /* Initdata */ 20#include <linux/init.h> /* Initdata */
21#include <linux/ioport.h> /* check_region, request_region */ 21#include <linux/ioport.h> /* request_region */
22#include <linux/delay.h> /* udelay */ 22#include <linux/delay.h> /* udelay */
23#include <linux/videodev.h> /* kernel radio structs */ 23#include <linux/videodev.h> /* kernel radio structs */
24#include <linux/isapnp.h> 24#include <linux/isapnp.h>
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 0732efda6a98..71971e9bb342 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -14,7 +14,7 @@
14 14
15#include <linux/module.h> /* Modules */ 15#include <linux/module.h> /* Modules */
16#include <linux/init.h> /* Initdata */ 16#include <linux/init.h> /* Initdata */
17#include <linux/ioport.h> /* check_region, request_region */ 17#include <linux/ioport.h> /* request_region */
18#include <linux/delay.h> /* udelay */ 18#include <linux/delay.h> /* udelay */
19#include <asm/io.h> /* outb, outb_p */ 19#include <asm/io.h> /* outb, outb_p */
20#include <asm/uaccess.h> /* copy to/from user */ 20#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c
index 248d67fde037..b03573c6840e 100644
--- a/drivers/media/radio/radio-terratec.c
+++ b/drivers/media/radio/radio-terratec.c
@@ -25,7 +25,7 @@
25 25
26#include <linux/module.h> /* Modules */ 26#include <linux/module.h> /* Modules */
27#include <linux/init.h> /* Initdata */ 27#include <linux/init.h> /* Initdata */
28#include <linux/ioport.h> /* check_region, request_region */ 28#include <linux/ioport.h> /* request_region */
29#include <linux/delay.h> /* udelay */ 29#include <linux/delay.h> /* udelay */
30#include <asm/io.h> /* outb, outb_p */ 30#include <asm/io.h> /* outb, outb_p */
31#include <asm/uaccess.h> /* copy to/from user */ 31#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c
index d7da901ebe90..f304f3c14763 100644
--- a/drivers/media/radio/radio-typhoon.c
+++ b/drivers/media/radio/radio-typhoon.c
@@ -31,7 +31,7 @@
31 31
32#include <linux/module.h> /* Modules */ 32#include <linux/module.h> /* Modules */
33#include <linux/init.h> /* Initdata */ 33#include <linux/init.h> /* Initdata */
34#include <linux/ioport.h> /* check_region, request_region */ 34#include <linux/ioport.h> /* request_region */
35#include <linux/proc_fs.h> /* radio card status report */ 35#include <linux/proc_fs.h> /* radio card status report */
36#include <asm/io.h> /* outb, outb_p */ 36#include <asm/io.h> /* outb, outb_p */
37#include <asm/uaccess.h> /* copy to/from user */ 37#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c
index 342f92df4aba..4c6d6fb49034 100644
--- a/drivers/media/radio/radio-zoltrix.c
+++ b/drivers/media/radio/radio-zoltrix.c
@@ -28,7 +28,7 @@
28 28
29#include <linux/module.h> /* Modules */ 29#include <linux/module.h> /* Modules */
30#include <linux/init.h> /* Initdata */ 30#include <linux/init.h> /* Initdata */
31#include <linux/ioport.h> /* check_region, request_region */ 31#include <linux/ioport.h> /* request_region */
32#include <linux/delay.h> /* udelay, msleep */ 32#include <linux/delay.h> /* udelay, msleep */
33#include <asm/io.h> /* outb, outb_p */ 33#include <asm/io.h> /* outb, outb_p */
34#include <asm/uaccess.h> /* copy to/from user */ 34#include <asm/uaccess.h> /* copy to/from user */
diff --git a/drivers/media/video/bttv-cards.c b/drivers/media/video/bttv-cards.c
index 190977a1e549..6c332800d6ab 100644
--- a/drivers/media/video/bttv-cards.c
+++ b/drivers/media/video/bttv-cards.c
@@ -2398,7 +2398,7 @@ struct tvcard bttv_tvcards[] = {
2398 .svhs = 2, 2398 .svhs = 2,
2399 .muxsel = { 2, 3 }, 2399 .muxsel = { 2, 3 },
2400 .gpiomask = 0x00e00007, 2400 .gpiomask = 0x00e00007,
2401 .audiomux = { 0x00400005, 0, 0, 0, 0, 0 }, 2401 .audiomux = { 0x00400005, 0, 0x00000001, 0, 0x00c00007, 0 },
2402 .no_msp34xx = 1, 2402 .no_msp34xx = 1,
2403 .no_tda9875 = 1, 2403 .no_tda9875 = 1,
2404 .no_tda7432 = 1, 2404 .no_tda7432 = 1,
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index a564321db2f0..c062a017491e 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -763,21 +763,21 @@ static void set_pll(struct bttv *btv)
763 /* no PLL needed */ 763 /* no PLL needed */
764 if (btv->pll.pll_current == 0) 764 if (btv->pll.pll_current == 0)
765 return; 765 return;
766 vprintk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n", 766 bttv_printk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n",
767 btv->c.nr,btv->pll.pll_ifreq); 767 btv->c.nr,btv->pll.pll_ifreq);
768 btwrite(0x00,BT848_TGCTRL); 768 btwrite(0x00,BT848_TGCTRL);
769 btwrite(0x00,BT848_PLL_XCI); 769 btwrite(0x00,BT848_PLL_XCI);
770 btv->pll.pll_current = 0; 770 btv->pll.pll_current = 0;
771 return; 771 return;
772 } 772 }
773 773
774 vprintk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr, 774 bttv_printk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr,
775 btv->pll.pll_ifreq, btv->pll.pll_ofreq); 775 btv->pll.pll_ifreq, btv->pll.pll_ofreq);
776 set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq); 776 set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq);
777 777
778 for (i=0; i<10; i++) { 778 for (i=0; i<10; i++) {
779 /* Let other people run while the PLL stabilizes */ 779 /* Let other people run while the PLL stabilizes */
780 vprintk("."); 780 bttv_printk(".");
781 msleep(10); 781 msleep(10);
782 782
783 if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) { 783 if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) {
@@ -785,12 +785,12 @@ static void set_pll(struct bttv *btv)
785 } else { 785 } else {
786 btwrite(0x08,BT848_TGCTRL); 786 btwrite(0x08,BT848_TGCTRL);
787 btv->pll.pll_current = btv->pll.pll_ofreq; 787 btv->pll.pll_current = btv->pll.pll_ofreq;
788 vprintk(" ok\n"); 788 bttv_printk(" ok\n");
789 return; 789 return;
790 } 790 }
791 } 791 }
792 btv->pll.pll_current = -1; 792 btv->pll.pll_current = -1;
793 vprintk("failed\n"); 793 bttv_printk("failed\n");
794 return; 794 return;
795} 795}
796 796
diff --git a/drivers/media/video/bttvp.h b/drivers/media/video/bttvp.h
index 9b0b7ca035f8..7a312f79340a 100644
--- a/drivers/media/video/bttvp.h
+++ b/drivers/media/video/bttvp.h
@@ -221,7 +221,7 @@ extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
221extern int init_bttv_i2c(struct bttv *btv); 221extern int init_bttv_i2c(struct bttv *btv);
222extern int fini_bttv_i2c(struct bttv *btv); 222extern int fini_bttv_i2c(struct bttv *btv);
223 223
224#define vprintk if (bttv_verbose) printk 224#define bttv_printk if (bttv_verbose) printk
225#define dprintk if (bttv_debug >= 1) printk 225#define dprintk if (bttv_debug >= 1) printk
226#define d2printk if (bttv_debug >= 2) printk 226#define d2printk if (bttv_debug >= 2) printk
227 227
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 8c08b7f1ad23..b7ec9bf45085 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -1397,7 +1397,7 @@ static void destroy_proc_cpia_cam(struct cam_data *cam)
1397 1397
1398static void proc_cpia_create(void) 1398static void proc_cpia_create(void)
1399{ 1399{
1400 cpia_proc_root = create_proc_entry("cpia", S_IFDIR, NULL); 1400 cpia_proc_root = proc_mkdir("cpia", NULL);
1401 1401
1402 if (cpia_proc_root) 1402 if (cpia_proc_root)
1403 cpia_proc_root->owner = THIS_MODULE; 1403 cpia_proc_root->owner = THIS_MODULE;
diff --git a/drivers/media/video/cx88/cx88-dvb.c b/drivers/media/video/cx88/cx88-dvb.c
index c9106b1d79df..4334744652de 100644
--- a/drivers/media/video/cx88/cx88-dvb.c
+++ b/drivers/media/video/cx88/cx88-dvb.c
@@ -221,9 +221,7 @@ static int lgdt330x_pll_set(struct dvb_frontend* fe,
221 int err; 221 int err;
222 222
223 /* Put the analog decoder in standby to keep it quiet */ 223 /* Put the analog decoder in standby to keep it quiet */
224 if (core->tda9887_conf) { 224 cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
225 cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
226 }
227 225
228 dvb_pll_configure(core->pll_desc, buf, params->frequency, 0); 226 dvb_pll_configure(core->pll_desc, buf, params->frequency, 0);
229 dprintk(1, "%s: tuner at 0x%02x bytes: 0x%02x 0x%02x 0x%02x 0x%02x\n", 227 dprintk(1, "%s: tuner at 0x%02x bytes: 0x%02x 0x%02x 0x%02x 0x%02x\n",
@@ -402,6 +400,9 @@ static int dvb_register(struct cx8802_dev *dev)
402 dev->dvb.frontend->ops->info.frequency_max = dev->core->pll_desc->max; 400 dev->dvb.frontend->ops->info.frequency_max = dev->core->pll_desc->max;
403 } 401 }
404 402
403 /* Put the analog decoder in standby to keep it quiet */
404 cx88_call_i2c_clients (dev->core, TUNER_SET_STANDBY, NULL);
405
405 /* register everything */ 406 /* register everything */
406 return videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev); 407 return videobuf_dvb_register(&dev->dvb, THIS_MODULE, dev);
407} 408}
diff --git a/drivers/media/video/rds.h b/drivers/media/video/rds.h
index 30337d0f1a87..0d30eb744e61 100644
--- a/drivers/media/video/rds.h
+++ b/drivers/media/video/rds.h
@@ -31,7 +31,7 @@
31struct rds_command { 31struct rds_command {
32 unsigned int block_count; 32 unsigned int block_count;
33 int result; 33 int result;
34 unsigned char *buffer; 34 unsigned char __user *buffer;
35 struct file *instance; 35 struct file *instance;
36 poll_table *event_list; 36 poll_table *event_list;
37}; 37};
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c
index 1a657a70ff43..72b70eb5da1d 100644
--- a/drivers/media/video/saa6588.c
+++ b/drivers/media/video/saa6588.c
@@ -157,7 +157,7 @@ static struct i2c_client client_template;
157 157
158/* ---------------------------------------------------------------------- */ 158/* ---------------------------------------------------------------------- */
159 159
160static int block_to_user_buf(struct saa6588 *s, unsigned char *user_buf) 160static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf)
161{ 161{
162 int i; 162 int i;
163 163
@@ -191,7 +191,7 @@ static void read_from_buf(struct saa6588 *s, struct rds_command *a)
191{ 191{
192 unsigned long flags; 192 unsigned long flags;
193 193
194 unsigned char *buf_ptr = a->buffer; /* This is a user space buffer! */ 194 unsigned char __user *buf_ptr = a->buffer;
195 unsigned int i; 195 unsigned int i;
196 unsigned int rd_blocks; 196 unsigned int rd_blocks;
197 197
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index 33f209a39cb6..1883d22cffeb 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -35,6 +35,23 @@ config FUSION_FC
35 LSIFC929X 35 LSIFC929X
36 LSIFC929XL 36 LSIFC929XL
37 37
38config FUSION_SAS
39 tristate "Fusion MPT ScsiHost drivers for SAS"
40 depends on PCI && SCSI
41 select FUSION
42 select SCSI_SAS_ATTRS
43 ---help---
44 SCSI HOST support for a SAS host adapters.
45
46 List of supported controllers:
47
48 LSISAS1064
49 LSISAS1066
50 LSISAS1068
51 LSISAS1064E
52 LSISAS1066E
53 LSISAS1068E
54
38config FUSION_MAX_SGE 55config FUSION_MAX_SGE
39 int "Maximum number of scatter gather entries (16 - 128)" 56 int "Maximum number of scatter gather entries (16 - 128)"
40 depends on FUSION 57 depends on FUSION
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
index 1d2f9db813c1..8a2e2657f4c2 100644
--- a/drivers/message/fusion/Makefile
+++ b/drivers/message/fusion/Makefile
@@ -34,5 +34,6 @@
34 34
35obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o 35obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o
36obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o 36obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o
37obj-$(CONFIG_FUSION_SAS) += mptbase.o mptscsih.o mptsas.o
37obj-$(CONFIG_FUSION_CTL) += mptctl.o 38obj-$(CONFIG_FUSION_CTL) += mptctl.o
38obj-$(CONFIG_FUSION_LAN) += mptlan.o 39obj-$(CONFIG_FUSION_LAN) += mptlan.o
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index f517d0692d5f..790a2932ded9 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -135,13 +135,12 @@ static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
135 135
136static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc); 136static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
137static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag); 137static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
138//static u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
139static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason); 138static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
140static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag); 139static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
141static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag); 140static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
142static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag); 141static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
143static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag); 142static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
144static int mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag); 143static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
145static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag); 144static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
146static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag); 145static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
147static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag); 146static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
@@ -152,6 +151,7 @@ static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
152static int GetLanConfigPages(MPT_ADAPTER *ioc); 151static int GetLanConfigPages(MPT_ADAPTER *ioc);
153static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum); 152static int GetFcPortPage0(MPT_ADAPTER *ioc, int portnum);
154static int GetIoUnitPage2(MPT_ADAPTER *ioc); 153static int GetIoUnitPage2(MPT_ADAPTER *ioc);
154int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
155static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); 155static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
156static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); 156static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
157static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); 157static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
@@ -159,6 +159,8 @@ static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
159static void mpt_timer_expired(unsigned long data); 159static void mpt_timer_expired(unsigned long data);
160static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); 160static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
161static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); 161static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
162static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
163static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
162 164
163#ifdef CONFIG_PROC_FS 165#ifdef CONFIG_PROC_FS
164static int procmpt_summary_read(char *buf, char **start, off_t offset, 166static int procmpt_summary_read(char *buf, char **start, off_t offset,
@@ -175,6 +177,7 @@ static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *
175static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); 177static void mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
176static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); 178static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
177static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info); 179static void mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info);
180static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info);
178 181
179/* module entry point */ 182/* module entry point */
180static int __init fusion_init (void); 183static int __init fusion_init (void);
@@ -206,6 +209,144 @@ pci_enable_io_access(struct pci_dev *pdev)
206 pci_write_config_word(pdev, PCI_COMMAND, command_reg); 209 pci_write_config_word(pdev, PCI_COMMAND, command_reg);
207} 210}
208 211
212/*
213 * Process turbo (context) reply...
214 */
215static void
216mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
217{
218 MPT_FRAME_HDR *mf = NULL;
219 MPT_FRAME_HDR *mr = NULL;
220 int req_idx = 0;
221 int cb_idx;
222
223 dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n",
224 ioc->name, pa));
225
226 switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
227 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
228 req_idx = pa & 0x0000FFFF;
229 cb_idx = (pa & 0x00FF0000) >> 16;
230 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
231 break;
232 case MPI_CONTEXT_REPLY_TYPE_LAN:
233 cb_idx = mpt_lan_index;
234 /*
235 * Blind set of mf to NULL here was fatal
236 * after lan_reply says "freeme"
237 * Fix sort of combined with an optimization here;
238 * added explicit check for case where lan_reply
239 * was just returning 1 and doing nothing else.
240 * For this case skip the callback, but set up
241 * proper mf value first here:-)
242 */
243 if ((pa & 0x58000000) == 0x58000000) {
244 req_idx = pa & 0x0000FFFF;
245 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
246 mpt_free_msg_frame(ioc, mf);
247 mb();
248 return;
249 break;
250 }
251 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
252 break;
253 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
254 cb_idx = mpt_stm_index;
255 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
256 break;
257 default:
258 cb_idx = 0;
259 BUG();
260 }
261
262 /* Check for (valid) IO callback! */
263 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
264 MptCallbacks[cb_idx] == NULL) {
265 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
266 __FUNCTION__, ioc->name, cb_idx);
267 goto out;
268 }
269
270 if (MptCallbacks[cb_idx](ioc, mf, mr))
271 mpt_free_msg_frame(ioc, mf);
272 out:
273 mb();
274}
275
276static void
277mpt_reply(MPT_ADAPTER *ioc, u32 pa)
278{
279 MPT_FRAME_HDR *mf;
280 MPT_FRAME_HDR *mr;
281 int req_idx;
282 int cb_idx;
283 int freeme;
284
285 u32 reply_dma_low;
286 u16 ioc_stat;
287
288 /* non-TURBO reply! Hmmm, something may be up...
289 * Newest turbo reply mechanism; get address
290 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
291 */
292
293 /* Map DMA address of reply header to cpu address.
294 * pa is 32 bits - but the dma address may be 32 or 64 bits
295 * get offset based only only the low addresses
296 */
297
298 reply_dma_low = (pa <<= 1);
299 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
300 (reply_dma_low - ioc->reply_frames_low_dma));
301
302 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
303 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
304 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
305
306 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
307 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
308 DBG_DUMP_REPLY_FRAME(mr)
309
310 /* Check/log IOC log info
311 */
312 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
313 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
314 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
315 if (ioc->bus_type == FC)
316 mpt_fc_log_info(ioc, log_info);
317 else if (ioc->bus_type == SCSI)
318 mpt_sp_log_info(ioc, log_info);
319 else if (ioc->bus_type == SAS)
320 mpt_sas_log_info(ioc, log_info);
321 }
322 if (ioc_stat & MPI_IOCSTATUS_MASK) {
323 if (ioc->bus_type == SCSI &&
324 cb_idx != mpt_stm_index &&
325 cb_idx != mpt_lan_index)
326 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
327 }
328
329
330 /* Check for (valid) IO callback! */
331 if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
332 MptCallbacks[cb_idx] == NULL) {
333 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
334 __FUNCTION__, ioc->name, cb_idx);
335 freeme = 0;
336 goto out;
337 }
338
339 freeme = MptCallbacks[cb_idx](ioc, mf, mr);
340
341 out:
342 /* Flush (non-TURBO) reply with a WRITE! */
343 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
344
345 if (freeme)
346 mpt_free_msg_frame(ioc, mf);
347 mb();
348}
349
209/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 350/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
210/* 351/*
211 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler. 352 * mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
@@ -227,164 +368,21 @@ pci_enable_io_access(struct pci_dev *pdev)
227static irqreturn_t 368static irqreturn_t
228mpt_interrupt(int irq, void *bus_id, struct pt_regs *r) 369mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
229{ 370{
230 MPT_ADAPTER *ioc; 371 MPT_ADAPTER *ioc = bus_id;
231 MPT_FRAME_HDR *mf; 372 u32 pa;
232 MPT_FRAME_HDR *mr;
233 u32 pa;
234 int req_idx;
235 int cb_idx;
236 int type;
237 int freeme;
238
239 ioc = (MPT_ADAPTER *)bus_id;
240 373
241 /* 374 /*
242 * Drain the reply FIFO! 375 * Drain the reply FIFO!
243 *
244 * NOTES: I've seen up to 10 replies processed in this loop, so far...
245 * Update: I've seen up to 9182 replies processed in this loop! ??
246 * Update: Limit ourselves to processing max of N replies
247 * (bottom of loop).
248 */ 376 */
249 while (1) { 377 while (1) {
250 378 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
251 if ((pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo)) == 0xFFFFFFFF) 379 if (pa == 0xFFFFFFFF)
252 return IRQ_HANDLED; 380 return IRQ_HANDLED;
253 381 else if (pa & MPI_ADDRESS_REPLY_A_BIT)
254 cb_idx = 0; 382 mpt_reply(ioc, pa);
255 freeme = 0; 383 else
256 384 mpt_turbo_reply(ioc, pa);
257 /* 385 }
258 * Check for non-TURBO reply!
259 */
260 if (pa & MPI_ADDRESS_REPLY_A_BIT) {
261 u32 reply_dma_low;
262 u16 ioc_stat;
263
264 /* non-TURBO reply! Hmmm, something may be up...
265 * Newest turbo reply mechanism; get address
266 * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
267 */
268
269 /* Map DMA address of reply header to cpu address.
270 * pa is 32 bits - but the dma address may be 32 or 64 bits
271 * get offset based only only the low addresses
272 */
273 reply_dma_low = (pa = (pa << 1));
274 mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
275 (reply_dma_low - ioc->reply_frames_low_dma));
276
277 req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
278 cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
279 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
280
281 dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
282 ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
283 DBG_DUMP_REPLY_FRAME(mr)
284
285 /* Check/log IOC log info
286 */
287 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
288 if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
289 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
290 if (ioc->bus_type == FC)
291 mpt_fc_log_info(ioc, log_info);
292 else if (ioc->bus_type == SCSI)
293 mpt_sp_log_info(ioc, log_info);
294 }
295 if (ioc_stat & MPI_IOCSTATUS_MASK) {
296 if (ioc->bus_type == SCSI)
297 mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
298 }
299 } else {
300 /*
301 * Process turbo (context) reply...
302 */
303 dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n", ioc->name, pa));
304 type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT);
305 if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) {
306 cb_idx = mpt_stm_index;
307 mf = NULL;
308 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
309 } else if (type == MPI_CONTEXT_REPLY_TYPE_LAN) {
310 cb_idx = mpt_lan_index;
311 /* Blind set of mf to NULL here was fatal
312 * after lan_reply says "freeme"
313 * Fix sort of combined with an optimization here;
314 * added explicit check for case where lan_reply
315 * was just returning 1 and doing nothing else.
316 * For this case skip the callback, but set up
317 * proper mf value first here:-)
318 */
319 if ((pa & 0x58000000) == 0x58000000) {
320 req_idx = pa & 0x0000FFFF;
321 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
322 freeme = 1;
323 /*
324 * IMPORTANT! Invalidate the callback!
325 */
326 cb_idx = 0;
327 } else {
328 mf = NULL;
329 }
330 mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
331 } else {
332 req_idx = pa & 0x0000FFFF;
333 cb_idx = (pa & 0x00FF0000) >> 16;
334 mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
335 mr = NULL;
336 }
337 pa = 0; /* No reply flush! */
338 }
339
340#ifdef MPT_DEBUG_IRQ
341 if (ioc->bus_type == SCSI) {
342 /* Verify mf, mr are reasonable.
343 */
344 if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
345 || (mf < ioc->req_frames)) ) {
346 printk(MYIOC_s_WARN_FMT
347 "mpt_interrupt: Invalid mf (%p)!\n", ioc->name, (void *)mf);
348 cb_idx = 0;
349 pa = 0;
350 freeme = 0;
351 }
352 if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth))
353 || (mr < ioc->reply_frames)) ) {
354 printk(MYIOC_s_WARN_FMT
355 "mpt_interrupt: Invalid rf (%p)!\n", ioc->name, (void *)mr);
356 cb_idx = 0;
357 pa = 0;
358 freeme = 0;
359 }
360 if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) {
361 printk(MYIOC_s_WARN_FMT
362 "mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx);
363 cb_idx = 0;
364 pa = 0;
365 freeme = 0;
366 }
367 }
368#endif
369
370 /* Check for (valid) IO callback! */
371 if (cb_idx) {
372 /* Do the callback! */
373 freeme = (*(MptCallbacks[cb_idx]))(ioc, mf, mr);
374 }
375
376 if (pa) {
377 /* Flush (non-TURBO) reply with a WRITE! */
378 CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
379 }
380
381 if (freeme) {
382 /* Put Request back on FreeQ! */
383 mpt_free_msg_frame(ioc, mf);
384 }
385
386 mb();
387 } /* drain reply FIFO */
388 386
389 return IRQ_HANDLED; 387 return IRQ_HANDLED;
390} 388}
@@ -509,6 +507,14 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
509 pCfg->wait_done = 1; 507 pCfg->wait_done = 1;
510 wake_up(&mpt_waitq); 508 wake_up(&mpt_waitq);
511 } 509 }
510 } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) {
511 /* we should be always getting a reply frame */
512 memcpy(ioc->persist_reply_frame, reply,
513 min(MPT_DEFAULT_FRAME_SIZE,
514 4*reply->u.reply.MsgLength));
515 del_timer(&ioc->persist_timer);
516 ioc->persist_wait_done = 1;
517 wake_up(&mpt_waitq);
512 } else { 518 } else {
513 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", 519 printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
514 ioc->name, func); 520 ioc->name, func);
@@ -750,6 +756,7 @@ mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc)
750 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR, 756 mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
751 u.frame.linkage.list); 757 u.frame.linkage.list);
752 list_del(&mf->u.frame.linkage.list); 758 list_del(&mf->u.frame.linkage.list);
759 mf->u.frame.linkage.arg1 = 0;
753 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */ 760 mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle; /* byte */
754 req_offset = (u8 *)mf - (u8 *)ioc->req_frames; 761 req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
755 /* u16! */ 762 /* u16! */
@@ -845,6 +852,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
845 852
846 /* Put Request back on FreeQ! */ 853 /* Put Request back on FreeQ! */
847 spin_lock_irqsave(&ioc->FreeQlock, flags); 854 spin_lock_irqsave(&ioc->FreeQlock, flags);
855 mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */
848 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); 856 list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
849#ifdef MFCNT 857#ifdef MFCNT
850 ioc->mfcnt--; 858 ioc->mfcnt--;
@@ -971,12 +979,123 @@ mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
971 979
972 /* Make sure there are no doorbells */ 980 /* Make sure there are no doorbells */
973 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 981 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
974 982
975 return r; 983 return r;
976} 984}
977 985
978/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 986/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
979/** 987/**
988 * mpt_host_page_access_control - provides mechanism for the host
989 * driver to control the IOC's Host Page Buffer access.
990 * @ioc: Pointer to MPT adapter structure
991 * @access_control_value: define bits below
992 *
993 * Access Control Value - bits[15:12]
994 * 0h Reserved
995 * 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
996 * 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
997 * 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
998 *
999 * Returns 0 for success, non-zero for failure.
1000 */
1001
1002static int
1003mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
1004{
1005 int r = 0;
1006
1007 /* return if in use */
1008 if (CHIPREG_READ32(&ioc->chip->Doorbell)
1009 & MPI_DOORBELL_ACTIVE)
1010 return -1;
1011
1012 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1013
1014 CHIPREG_WRITE32(&ioc->chip->Doorbell,
1015 ((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
1016 <<MPI_DOORBELL_FUNCTION_SHIFT) |
1017 (access_control_value<<12)));
1018
1019 /* Wait for IOC to clear Doorbell Status bit */
1020 if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
1021 return -2;
1022 }else
1023 return 0;
1024}
1025
1026/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1027/**
1028 * mpt_host_page_alloc - allocate system memory for the fw
1029 * If we already allocated memory in past, then resend the same pointer.
1030 * ioc@: Pointer to pointer to IOC adapter
1031 * ioc_init@: Pointer to ioc init config page
1032 *
1033 * Returns 0 for success, non-zero for failure.
1034 */
1035static int
1036mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
1037{
1038 char *psge;
1039 int flags_length;
1040 u32 host_page_buffer_sz=0;
1041
1042 if(!ioc->HostPageBuffer) {
1043
1044 host_page_buffer_sz =
1045 le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
1046
1047 if(!host_page_buffer_sz)
1048 return 0; /* fw doesn't need any host buffers */
1049
1050 /* spin till we get enough memory */
1051 while(host_page_buffer_sz > 0) {
1052
1053 if((ioc->HostPageBuffer = pci_alloc_consistent(
1054 ioc->pcidev,
1055 host_page_buffer_sz,
1056 &ioc->HostPageBuffer_dma)) != NULL) {
1057
1058 dinitprintk((MYIOC_s_INFO_FMT
1059 "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
1060 ioc->name,
1061 ioc->HostPageBuffer,
1062 ioc->HostPageBuffer_dma,
1063 host_page_buffer_sz));
1064 ioc->alloc_total += host_page_buffer_sz;
1065 ioc->HostPageBuffer_sz = host_page_buffer_sz;
1066 break;
1067 }
1068
1069 host_page_buffer_sz -= (4*1024);
1070 }
1071 }
1072
1073 if(!ioc->HostPageBuffer) {
1074 printk(MYIOC_s_ERR_FMT
1075 "Failed to alloc memory for host_page_buffer!\n",
1076 ioc->name);
1077 return -999;
1078 }
1079
1080 psge = (char *)&ioc_init->HostPageBufferSGE;
1081 flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1082 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
1083 MPI_SGE_FLAGS_32_BIT_ADDRESSING |
1084 MPI_SGE_FLAGS_HOST_TO_IOC |
1085 MPI_SGE_FLAGS_END_OF_BUFFER;
1086 if (sizeof(dma_addr_t) == sizeof(u64)) {
1087 flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1088 }
1089 flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
1090 flags_length |= ioc->HostPageBuffer_sz;
1091 mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
1092 ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
1093
1094return 0;
1095}
1096
1097/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1098/**
980 * mpt_verify_adapter - Given a unique IOC identifier, set pointer to 1099 * mpt_verify_adapter - Given a unique IOC identifier, set pointer to
981 * the associated MPT adapter structure. 1100 * the associated MPT adapter structure.
982 * @iocid: IOC unique identifier (integer) 1101 * @iocid: IOC unique identifier (integer)
@@ -1084,7 +1203,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1084 1203
1085 /* Initilize SCSI Config Data structure 1204 /* Initilize SCSI Config Data structure
1086 */ 1205 */
1087 memset(&ioc->spi_data, 0, sizeof(ScsiCfgData)); 1206 memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
1088 1207
1089 /* Initialize the running configQ head. 1208 /* Initialize the running configQ head.
1090 */ 1209 */
@@ -1213,6 +1332,33 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1213 ioc->prod_name = "LSI53C1035"; 1332 ioc->prod_name = "LSI53C1035";
1214 ioc->bus_type = SCSI; 1333 ioc->bus_type = SCSI;
1215 } 1334 }
1335 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064) {
1336 ioc->prod_name = "LSISAS1064";
1337 ioc->bus_type = SAS;
1338 ioc->errata_flag_1064 = 1;
1339 }
1340 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066) {
1341 ioc->prod_name = "LSISAS1066";
1342 ioc->bus_type = SAS;
1343 ioc->errata_flag_1064 = 1;
1344 }
1345 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068) {
1346 ioc->prod_name = "LSISAS1068";
1347 ioc->bus_type = SAS;
1348 ioc->errata_flag_1064 = 1;
1349 }
1350 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1064E) {
1351 ioc->prod_name = "LSISAS1064E";
1352 ioc->bus_type = SAS;
1353 }
1354 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1066E) {
1355 ioc->prod_name = "LSISAS1066E";
1356 ioc->bus_type = SAS;
1357 }
1358 else if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1068E) {
1359 ioc->prod_name = "LSISAS1068E";
1360 ioc->bus_type = SAS;
1361 }
1216 1362
1217 if (ioc->errata_flag_1064) 1363 if (ioc->errata_flag_1064)
1218 pci_disable_io_access(pdev); 1364 pci_disable_io_access(pdev);
@@ -1604,8 +1750,23 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1604 */ 1750 */
1605 if (ret == 0) { 1751 if (ret == 0) {
1606 rc = mpt_do_upload(ioc, sleepFlag); 1752 rc = mpt_do_upload(ioc, sleepFlag);
1607 if (rc != 0) 1753 if (rc == 0) {
1754 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
1755 /*
1756 * Maintain only one pointer to FW memory
1757 * so there will not be two attempt to
1758 * downloadboot onboard dual function
1759 * chips (mpt_adapter_disable,
1760 * mpt_diag_reset)
1761 */
1762 ioc->cached_fw = NULL;
1763 ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
1764 ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
1765 }
1766 } else {
1608 printk(KERN_WARNING MYNAM ": firmware upload failure!\n"); 1767 printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
1768 ret = -5;
1769 }
1609 } 1770 }
1610 } 1771 }
1611 } 1772 }
@@ -1640,7 +1801,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1640 * and we try GetLanConfigPages again... 1801 * and we try GetLanConfigPages again...
1641 */ 1802 */
1642 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) { 1803 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
1643 if (ioc->bus_type == FC) { 1804 if (ioc->bus_type == SAS) {
1805
1806 /* clear persistency table */
1807 if(ioc->facts.IOCExceptions &
1808 MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
1809 ret = mptbase_sas_persist_operation(ioc,
1810 MPI_SAS_OP_CLEAR_NOT_PRESENT);
1811 if(ret != 0)
1812 return -1;
1813 }
1814
1815 /* Find IM volumes
1816 */
1817 mpt_findImVolumes(ioc);
1818
1819 } else if (ioc->bus_type == FC) {
1644 /* 1820 /*
1645 * Pre-fetch FC port WWN and stuff... 1821 * Pre-fetch FC port WWN and stuff...
1646 * (FCPortPage0_t stuff) 1822 * (FCPortPage0_t stuff)
@@ -1783,7 +1959,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1783 1959
1784 if (ioc->cached_fw != NULL) { 1960 if (ioc->cached_fw != NULL) {
1785 ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n")); 1961 ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n"));
1786 if ((ret = mpt_downloadboot(ioc, NO_SLEEP)) < 0) { 1962 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)ioc->cached_fw, NO_SLEEP)) < 0) {
1787 printk(KERN_WARNING MYNAM 1963 printk(KERN_WARNING MYNAM
1788 ": firmware downloadboot failure (%d)!\n", ret); 1964 ": firmware downloadboot failure (%d)!\n", ret);
1789 } 1965 }
@@ -1831,9 +2007,9 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1831 } 2007 }
1832 2008
1833 kfree(ioc->spi_data.nvram); 2009 kfree(ioc->spi_data.nvram);
1834 kfree(ioc->spi_data.pIocPg3); 2010 kfree(ioc->raid_data.pIocPg3);
1835 ioc->spi_data.nvram = NULL; 2011 ioc->spi_data.nvram = NULL;
1836 ioc->spi_data.pIocPg3 = NULL; 2012 ioc->raid_data.pIocPg3 = NULL;
1837 2013
1838 if (ioc->spi_data.pIocPg4 != NULL) { 2014 if (ioc->spi_data.pIocPg4 != NULL) {
1839 sz = ioc->spi_data.IocPg4Sz; 2015 sz = ioc->spi_data.IocPg4Sz;
@@ -1852,6 +2028,23 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
1852 2028
1853 kfree(ioc->ChainToChain); 2029 kfree(ioc->ChainToChain);
1854 ioc->ChainToChain = NULL; 2030 ioc->ChainToChain = NULL;
2031
2032 if (ioc->HostPageBuffer != NULL) {
2033 if((ret = mpt_host_page_access_control(ioc,
2034 MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
2035 printk(KERN_ERR MYNAM
2036 ": %s: host page buffers free failed (%d)!\n",
2037 __FUNCTION__, ret);
2038 }
2039 dexitprintk((KERN_INFO MYNAM ": %s HostPageBuffer free @ %p, sz=%d bytes\n",
2040 ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz));
2041 pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
2042 ioc->HostPageBuffer,
2043 ioc->HostPageBuffer_dma);
2044 ioc->HostPageBuffer = NULL;
2045 ioc->HostPageBuffer_sz = 0;
2046 ioc->alloc_total -= ioc->HostPageBuffer_sz;
2047 }
1855} 2048}
1856 2049
1857/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2050/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2034,7 +2227,7 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
2034 * Loop here waiting for IOC to come READY. 2227 * Loop here waiting for IOC to come READY.
2035 */ 2228 */
2036 ii = 0; 2229 ii = 0;
2037 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */ 2230 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
2038 2231
2039 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) { 2232 while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
2040 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) { 2233 if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
@@ -2212,6 +2405,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
2212 le32_to_cpu(facts->CurrentSenseBufferHighAddr); 2405 le32_to_cpu(facts->CurrentSenseBufferHighAddr);
2213 facts->CurReplyFrameSize = 2406 facts->CurReplyFrameSize =
2214 le16_to_cpu(facts->CurReplyFrameSize); 2407 le16_to_cpu(facts->CurReplyFrameSize);
2408 facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
2215 2409
2216 /* 2410 /*
2217 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx 2411 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
@@ -2383,13 +2577,25 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2383 ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n", 2577 ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n",
2384 ioc->name, ioc->upload_fw, ioc->facts.Flags)); 2578 ioc->name, ioc->upload_fw, ioc->facts.Flags));
2385 2579
2386 if (ioc->bus_type == FC) 2580 if(ioc->bus_type == SAS)
2581 ioc_init.MaxDevices = ioc->facts.MaxDevices;
2582 else if(ioc->bus_type == FC)
2387 ioc_init.MaxDevices = MPT_MAX_FC_DEVICES; 2583 ioc_init.MaxDevices = MPT_MAX_FC_DEVICES;
2388 else 2584 else
2389 ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES; 2585 ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES;
2390
2391 ioc_init.MaxBuses = MPT_MAX_BUS; 2586 ioc_init.MaxBuses = MPT_MAX_BUS;
2392 2587 dinitprintk((MYIOC_s_INFO_FMT "facts.MsgVersion=%x\n",
2588 ioc->name, ioc->facts.MsgVersion));
2589 if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
2590 // set MsgVersion and HeaderVersion host driver was built with
2591 ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
2592 ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
2593
2594 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
2595 ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
2596 } else if(mpt_host_page_alloc(ioc, &ioc_init))
2597 return -99;
2598 }
2393 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ 2599 ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
2394 2600
2395 if (sizeof(dma_addr_t) == sizeof(u64)) { 2601 if (sizeof(dma_addr_t) == sizeof(u64)) {
@@ -2403,17 +2609,21 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
2403 ioc_init.HostMfaHighAddr = cpu_to_le32(0); 2609 ioc_init.HostMfaHighAddr = cpu_to_le32(0);
2404 ioc_init.SenseBufferHighAddr = cpu_to_le32(0); 2610 ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
2405 } 2611 }
2406 2612
2407 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr; 2613 ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
2408 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr; 2614 ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
2615 ioc->facts.MaxDevices = ioc_init.MaxDevices;
2616 ioc->facts.MaxBuses = ioc_init.MaxBuses;
2409 2617
2410 dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n", 2618 dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n",
2411 ioc->name, &ioc_init)); 2619 ioc->name, &ioc_init));
2412 2620
2413 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init, 2621 r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
2414 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag); 2622 sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
2415 if (r != 0) 2623 if (r != 0) {
2624 printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
2416 return r; 2625 return r;
2626 }
2417 2627
2418 /* No need to byte swap the multibyte fields in the reply 2628 /* No need to byte swap the multibyte fields in the reply
2419 * since we don't even look at it's contents. 2629 * since we don't even look at it's contents.
@@ -2472,7 +2682,7 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
2472{ 2682{
2473 PortEnable_t port_enable; 2683 PortEnable_t port_enable;
2474 MPIDefaultReply_t reply_buf; 2684 MPIDefaultReply_t reply_buf;
2475 int ii; 2685 int rc;
2476 int req_sz; 2686 int req_sz;
2477 int reply_sz; 2687 int reply_sz;
2478 2688
@@ -2494,22 +2704,15 @@ SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
2494 2704
2495 /* RAID FW may take a long time to enable 2705 /* RAID FW may take a long time to enable
2496 */ 2706 */
2497 if (ioc->bus_type == FC) { 2707 if ( (ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
2498 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable, 2708 > MPI_FW_HEADER_PID_PROD_TARGET_SCSI ) {
2499 reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag); 2709 rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2500 } else {
2501 ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2502 reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag); 2710 reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag);
2711 } else {
2712 rc = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
2713 reply_sz, (u16*)&reply_buf, 30 /*seconds*/, sleepFlag);
2503 } 2714 }
2504 2715 return rc;
2505 if (ii != 0)
2506 return ii;
2507
2508 /* We do not even look at the reply, so we need not
2509 * swap the multi-byte fields.
2510 */
2511
2512 return 0;
2513} 2716}
2514 2717
2515/* 2718/*
@@ -2666,9 +2869,8 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
2666 * <0 for fw upload failure. 2869 * <0 for fw upload failure.
2667 */ 2870 */
2668static int 2871static int
2669mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag) 2872mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
2670{ 2873{
2671 MpiFwHeader_t *pFwHeader;
2672 MpiExtImageHeader_t *pExtImage; 2874 MpiExtImageHeader_t *pExtImage;
2673 u32 fwSize; 2875 u32 fwSize;
2674 u32 diag0val; 2876 u32 diag0val;
@@ -2679,18 +2881,8 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2679 u32 load_addr; 2881 u32 load_addr;
2680 u32 ioc_state=0; 2882 u32 ioc_state=0;
2681 2883
2682 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x, ioc FW Ptr %p\n", 2884 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
2683 ioc->name, ioc->facts.FWImageSize, ioc->cached_fw)); 2885 ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
2684
2685 if ( ioc->facts.FWImageSize == 0 )
2686 return -1;
2687
2688 if (ioc->cached_fw == NULL)
2689 return -2;
2690
2691 /* prevent a second downloadboot and memory free with alt_ioc */
2692 if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
2693 ioc->alt_ioc->cached_fw = NULL;
2694 2886
2695 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 2887 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2696 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE); 2888 CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
@@ -2718,16 +2910,17 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2718 ioc->name, count)); 2910 ioc->name, count));
2719 break; 2911 break;
2720 } 2912 }
2721 /* wait 1 sec */ 2913 /* wait .1 sec */
2722 if (sleepFlag == CAN_SLEEP) { 2914 if (sleepFlag == CAN_SLEEP) {
2723 msleep_interruptible (1000); 2915 msleep_interruptible (100);
2724 } else { 2916 } else {
2725 mdelay (1000); 2917 mdelay (100);
2726 } 2918 }
2727 } 2919 }
2728 2920
2729 if ( count == 30 ) { 2921 if ( count == 30 ) {
2730 ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! Unable to RESET_ADAPTER diag0val=%x\n", 2922 ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! "
2923 "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
2731 ioc->name, diag0val)); 2924 ioc->name, diag0val));
2732 return -3; 2925 return -3;
2733 } 2926 }
@@ -2742,7 +2935,6 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2742 /* Set the DiagRwEn and Disable ARM bits */ 2935 /* Set the DiagRwEn and Disable ARM bits */
2743 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM)); 2936 CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
2744 2937
2745 pFwHeader = (MpiFwHeader_t *) ioc->cached_fw;
2746 fwSize = (pFwHeader->ImageSize + 3)/4; 2938 fwSize = (pFwHeader->ImageSize + 3)/4;
2747 ptrFw = (u32 *) pFwHeader; 2939 ptrFw = (u32 *) pFwHeader;
2748 2940
@@ -2792,19 +2984,38 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2792 /* Clear the internal flash bad bit - autoincrementing register, 2984 /* Clear the internal flash bad bit - autoincrementing register,
2793 * so must do two writes. 2985 * so must do two writes.
2794 */ 2986 */
2795 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); 2987 if (ioc->bus_type == SCSI) {
2796 diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData); 2988 /*
2797 diagRwData |= 0x4000000; 2989 * 1030 and 1035 H/W errata, workaround to access
2798 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000); 2990 * the ClearFlashBadSignatureBit
2799 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData); 2991 */
2992 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
2993 diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
2994 diagRwData |= 0x40000000;
2995 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
2996 CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
2997
2998 } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
2999 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
3000 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
3001 MPI_DIAG_CLEAR_FLASH_BAD_SIG);
3002
3003 /* wait 1 msec */
3004 if (sleepFlag == CAN_SLEEP) {
3005 msleep_interruptible (1);
3006 } else {
3007 mdelay (1);
3008 }
3009 }
2800 3010
2801 if (ioc->errata_flag_1064) 3011 if (ioc->errata_flag_1064)
2802 pci_disable_io_access(ioc->pcidev); 3012 pci_disable_io_access(ioc->pcidev);
2803 3013
2804 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic); 3014 diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
2805 ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, turning off PREVENT_IOC_BOOT, DISABLE_ARM\n", 3015 ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, "
3016 "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
2806 ioc->name, diag0val)); 3017 ioc->name, diag0val));
2807 diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM); 3018 diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE);
2808 ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n", 3019 ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n",
2809 ioc->name, diag0val)); 3020 ioc->name, diag0val));
2810 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val); 3021 CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
@@ -2812,10 +3023,23 @@ mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
2812 /* Write 0xFF to reset the sequencer */ 3023 /* Write 0xFF to reset the sequencer */
2813 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF); 3024 CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
2814 3025
3026 if (ioc->bus_type == SAS) {
3027 ioc_state = mpt_GetIocState(ioc, 0);
3028 if ( (GetIocFacts(ioc, sleepFlag,
3029 MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
3030 ddlprintk((MYIOC_s_INFO_FMT "GetIocFacts failed: IocState=%x\n",
3031 ioc->name, ioc_state));
3032 return -EFAULT;
3033 }
3034 }
3035
2815 for (count=0; count<HZ*20; count++) { 3036 for (count=0; count<HZ*20; count++) {
2816 if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) { 3037 if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
2817 ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n", 3038 ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n",
2818 ioc->name, count, ioc_state)); 3039 ioc->name, count, ioc_state));
3040 if (ioc->bus_type == SAS) {
3041 return 0;
3042 }
2819 if ((SendIocInit(ioc, sleepFlag)) != 0) { 3043 if ((SendIocInit(ioc, sleepFlag)) != 0) {
2820 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n", 3044 ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n",
2821 ioc->name)); 3045 ioc->name));
@@ -3049,12 +3273,13 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3049 3273
3050 /* wait 1 sec */ 3274 /* wait 1 sec */
3051 if (sleepFlag == CAN_SLEEP) { 3275 if (sleepFlag == CAN_SLEEP) {
3052 ssleep(1); 3276 msleep_interruptible (1000);
3053 } else { 3277 } else {
3054 mdelay (1000); 3278 mdelay (1000);
3055 } 3279 }
3056 } 3280 }
3057 if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) { 3281 if ((count = mpt_downloadboot(ioc,
3282 (MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) {
3058 printk(KERN_WARNING MYNAM 3283 printk(KERN_WARNING MYNAM
3059 ": firmware downloadboot failure (%d)!\n", count); 3284 ": firmware downloadboot failure (%d)!\n", count);
3060 } 3285 }
@@ -3637,7 +3862,7 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3637 int count = 0; 3862 int count = 0;
3638 u32 intstat=0; 3863 u32 intstat=0;
3639 3864
3640 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong; 3865 cntdn = 1000 * howlong;
3641 3866
3642 if (sleepFlag == CAN_SLEEP) { 3867 if (sleepFlag == CAN_SLEEP) {
3643 while (--cntdn) { 3868 while (--cntdn) {
@@ -3687,7 +3912,7 @@ WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
3687 int count = 0; 3912 int count = 0;
3688 u32 intstat=0; 3913 u32 intstat=0;
3689 3914
3690 cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong; 3915 cntdn = 1000 * howlong;
3691 if (sleepFlag == CAN_SLEEP) { 3916 if (sleepFlag == CAN_SLEEP) {
3692 while (--cntdn) { 3917 while (--cntdn) {
3693 intstat = CHIPREG_READ32(&ioc->chip->IntStatus); 3918 intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
@@ -4001,6 +4226,85 @@ GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
4001 4226
4002/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 4227/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4003/* 4228/*
4229 * mptbase_sas_persist_operation - Perform operation on SAS Persitent Table
4230 * @ioc: Pointer to MPT_ADAPTER structure
4231 * @sas_address: 64bit SAS Address for operation.
4232 * @target_id: specified target for operation
4233 * @bus: specified bus for operation
4234 * @persist_opcode: see below
4235 *
4236 * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
4237 * devices not currently present.
4238 * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
4239 *
4240 * NOTE: Don't use not this function during interrupt time.
4241 *
4242 * Returns: 0 for success, non-zero error
4243 */
4244
4245/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4246int
4247mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4248{
4249 SasIoUnitControlRequest_t *sasIoUnitCntrReq;
4250 SasIoUnitControlReply_t *sasIoUnitCntrReply;
4251 MPT_FRAME_HDR *mf = NULL;
4252 MPIHeader_t *mpi_hdr;
4253
4254
4255 /* insure garbage is not sent to fw */
4256 switch(persist_opcode) {
4257
4258 case MPI_SAS_OP_CLEAR_NOT_PRESENT:
4259 case MPI_SAS_OP_CLEAR_ALL_PERSISTENT:
4260 break;
4261
4262 default:
4263 return -1;
4264 break;
4265 }
4266
4267 printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode);
4268
4269 /* Get a MF for this command.
4270 */
4271 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4272 printk("%s: no msg frames!\n",__FUNCTION__);
4273 return -1;
4274 }
4275
4276 mpi_hdr = (MPIHeader_t *) mf;
4277 sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
4278 memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
4279 sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
4280 sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
4281 sasIoUnitCntrReq->Operation = persist_opcode;
4282
4283 init_timer(&ioc->persist_timer);
4284 ioc->persist_timer.data = (unsigned long) ioc;
4285 ioc->persist_timer.function = mpt_timer_expired;
4286 ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
4287 ioc->persist_wait_done=0;
4288 add_timer(&ioc->persist_timer);
4289 mpt_put_msg_frame(mpt_base_index, ioc, mf);
4290 wait_event(mpt_waitq, ioc->persist_wait_done);
4291
4292 sasIoUnitCntrReply =
4293 (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
4294 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4295 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4296 __FUNCTION__,
4297 sasIoUnitCntrReply->IOCStatus,
4298 sasIoUnitCntrReply->IOCLogInfo);
4299 return -1;
4300 }
4301
4302 printk("%s: success\n",__FUNCTION__);
4303 return 0;
4304}
4305
4306/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
4307/*
4004 * GetIoUnitPage2 - Retrieve BIOS version and boot order information. 4308 * GetIoUnitPage2 - Retrieve BIOS version and boot order information.
4005 * @ioc: Pointer to MPT_ADAPTER structure 4309 * @ioc: Pointer to MPT_ADAPTER structure
4006 * 4310 *
@@ -4340,10 +4644,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4340 if (mpt_config(ioc, &cfg) != 0) 4644 if (mpt_config(ioc, &cfg) != 0)
4341 goto done_and_free; 4645 goto done_and_free;
4342 4646
4343 if ( (mem = (u8 *)ioc->spi_data.pIocPg2) == NULL ) { 4647 if ( (mem = (u8 *)ioc->raid_data.pIocPg2) == NULL ) {
4344 mem = kmalloc(iocpage2sz, GFP_ATOMIC); 4648 mem = kmalloc(iocpage2sz, GFP_ATOMIC);
4345 if (mem) { 4649 if (mem) {
4346 ioc->spi_data.pIocPg2 = (IOCPage2_t *) mem; 4650 ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
4347 } else { 4651 } else {
4348 goto done_and_free; 4652 goto done_and_free;
4349 } 4653 }
@@ -4360,7 +4664,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4360 /* At least 1 RAID Volume 4664 /* At least 1 RAID Volume
4361 */ 4665 */
4362 pIocRv = pIoc2->RaidVolume; 4666 pIocRv = pIoc2->RaidVolume;
4363 ioc->spi_data.isRaid = 0; 4667 ioc->raid_data.isRaid = 0;
4364 for (jj = 0; jj < nVols; jj++, pIocRv++) { 4668 for (jj = 0; jj < nVols; jj++, pIocRv++) {
4365 vid = pIocRv->VolumeID; 4669 vid = pIocRv->VolumeID;
4366 vbus = pIocRv->VolumeBus; 4670 vbus = pIocRv->VolumeBus;
@@ -4369,7 +4673,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
4369 /* find the match 4673 /* find the match
4370 */ 4674 */
4371 if (vbus == 0) { 4675 if (vbus == 0) {
4372 ioc->spi_data.isRaid |= (1 << vid); 4676 ioc->raid_data.isRaid |= (1 << vid);
4373 } else { 4677 } else {
4374 /* Error! Always bus 0 4678 /* Error! Always bus 0
4375 */ 4679 */
@@ -4404,8 +4708,8 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
4404 4708
4405 /* Free the old page 4709 /* Free the old page
4406 */ 4710 */
4407 kfree(ioc->spi_data.pIocPg3); 4711 kfree(ioc->raid_data.pIocPg3);
4408 ioc->spi_data.pIocPg3 = NULL; 4712 ioc->raid_data.pIocPg3 = NULL;
4409 4713
4410 /* There is at least one physical disk. 4714 /* There is at least one physical disk.
4411 * Read and save IOC Page 3 4715 * Read and save IOC Page 3
@@ -4442,7 +4746,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
4442 mem = kmalloc(iocpage3sz, GFP_ATOMIC); 4746 mem = kmalloc(iocpage3sz, GFP_ATOMIC);
4443 if (mem) { 4747 if (mem) {
4444 memcpy(mem, (u8 *)pIoc3, iocpage3sz); 4748 memcpy(mem, (u8 *)pIoc3, iocpage3sz);
4445 ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem; 4749 ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
4446 } 4750 }
4447 } 4751 }
4448 4752
@@ -5366,8 +5670,8 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
5366} 5670}
5367 5671
5368/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5672/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5369static char * 5673static void
5370EventDescriptionStr(u8 event, u32 evData0) 5674EventDescriptionStr(u8 event, u32 evData0, char *evStr)
5371{ 5675{
5372 char *ds; 5676 char *ds;
5373 5677
@@ -5420,8 +5724,95 @@ EventDescriptionStr(u8 event, u32 evData0)
5420 ds = "Events(OFF) Change"; 5724 ds = "Events(OFF) Change";
5421 break; 5725 break;
5422 case MPI_EVENT_INTEGRATED_RAID: 5726 case MPI_EVENT_INTEGRATED_RAID:
5423 ds = "Integrated Raid"; 5727 {
5728 u8 ReasonCode = (u8)(evData0 >> 16);
5729 switch (ReasonCode) {
5730 case MPI_EVENT_RAID_RC_VOLUME_CREATED :
5731 ds = "Integrated Raid: Volume Created";
5732 break;
5733 case MPI_EVENT_RAID_RC_VOLUME_DELETED :
5734 ds = "Integrated Raid: Volume Deleted";
5735 break;
5736 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED :
5737 ds = "Integrated Raid: Volume Settings Changed";
5738 break;
5739 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED :
5740 ds = "Integrated Raid: Volume Status Changed";
5741 break;
5742 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED :
5743 ds = "Integrated Raid: Volume Physdisk Changed";
5744 break;
5745 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED :
5746 ds = "Integrated Raid: Physdisk Created";
5747 break;
5748 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED :
5749 ds = "Integrated Raid: Physdisk Deleted";
5750 break;
5751 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED :
5752 ds = "Integrated Raid: Physdisk Settings Changed";
5753 break;
5754 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED :
5755 ds = "Integrated Raid: Physdisk Status Changed";
5756 break;
5757 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED :
5758 ds = "Integrated Raid: Domain Validation Needed";
5759 break;
5760 case MPI_EVENT_RAID_RC_SMART_DATA :
5761 ds = "Integrated Raid; Smart Data";
5762 break;
5763 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED :
5764 ds = "Integrated Raid: Replace Action Started";
5765 break;
5766 default:
5767 ds = "Integrated Raid";
5768 break;
5769 }
5770 break;
5771 }
5772 case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:
5773 ds = "SCSI Device Status Change";
5774 break;
5775 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
5776 {
5777 u8 ReasonCode = (u8)(evData0 >> 16);
5778 switch (ReasonCode) {
5779 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
5780 ds = "SAS Device Status Change: Added";
5781 break;
5782 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
5783 ds = "SAS Device Status Change: Deleted";
5784 break;
5785 case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
5786 ds = "SAS Device Status Change: SMART Data";
5787 break;
5788 case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
5789 ds = "SAS Device Status Change: No Persistancy Added";
5790 break;
5791 default:
5792 ds = "SAS Device Status Change: Unknown";
5793 break;
5794 }
5795 break;
5796 }
5797 case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
5798 ds = "Bus Timer Expired";
5799 break;
5800 case MPI_EVENT_QUEUE_FULL:
5801 ds = "Queue Full";
5802 break;
5803 case MPI_EVENT_SAS_SES:
5804 ds = "SAS SES Event";
5805 break;
5806 case MPI_EVENT_PERSISTENT_TABLE_FULL:
5807 ds = "Persistent Table Full";
5808 break;
5809 case MPI_EVENT_SAS_PHY_LINK_STATUS:
5810 ds = "SAS PHY Link Status";
5811 break;
5812 case MPI_EVENT_SAS_DISCOVERY_ERROR:
5813 ds = "SAS Discovery Error";
5424 break; 5814 break;
5815
5425 /* 5816 /*
5426 * MPT base "custom" events may be added here... 5817 * MPT base "custom" events may be added here...
5427 */ 5818 */
@@ -5429,7 +5820,7 @@ EventDescriptionStr(u8 event, u32 evData0)
5429 ds = "Unknown"; 5820 ds = "Unknown";
5430 break; 5821 break;
5431 } 5822 }
5432 return ds; 5823 strcpy(evStr,ds);
5433} 5824}
5434 5825
5435/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 5826/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5451,7 +5842,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5451 int ii; 5842 int ii;
5452 int r = 0; 5843 int r = 0;
5453 int handlers = 0; 5844 int handlers = 0;
5454 char *evStr; 5845 char evStr[100];
5455 u8 event; 5846 u8 event;
5456 5847
5457 /* 5848 /*
@@ -5464,7 +5855,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5464 evData0 = le32_to_cpu(pEventReply->Data[0]); 5855 evData0 = le32_to_cpu(pEventReply->Data[0]);
5465 } 5856 }
5466 5857
5467 evStr = EventDescriptionStr(event, evData0); 5858 EventDescriptionStr(event, evData0, evStr);
5468 devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n", 5859 devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n",
5469 ioc->name, 5860 ioc->name,
5470 evStr, 5861 evStr,
@@ -5481,20 +5872,6 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5481 * Do general / base driver event processing 5872 * Do general / base driver event processing
5482 */ 5873 */
5483 switch(event) { 5874 switch(event) {
5484 case MPI_EVENT_NONE: /* 00 */
5485 case MPI_EVENT_LOG_DATA: /* 01 */
5486 case MPI_EVENT_STATE_CHANGE: /* 02 */
5487 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
5488 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
5489 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
5490 case MPI_EVENT_RESCAN: /* 06 */
5491 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
5492 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
5493 case MPI_EVENT_LOGOUT: /* 09 */
5494 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
5495 case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE: /* 0C */
5496 default:
5497 break;
5498 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 5875 case MPI_EVENT_EVENT_CHANGE: /* 0A */
5499 if (evDataLen) { 5876 if (evDataLen) {
5500 u8 evState = evData0 & 0xFF; 5877 u8 evState = evData0 & 0xFF;
@@ -5507,6 +5884,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
5507 } 5884 }
5508 } 5885 }
5509 break; 5886 break;
5887 default:
5888 break;
5510 } 5889 }
5511 5890
5512 /* 5891 /*
@@ -5653,6 +6032,111 @@ mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
5653 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc); 6032 printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
5654} 6033}
5655 6034
6035/* strings for sas loginfo */
6036 static char *originator_str[] = {
6037 "IOP", /* 00h */
6038 "PL", /* 01h */
6039 "IR" /* 02h */
6040 };
6041 static char *iop_code_str[] = {
6042 NULL, /* 00h */
6043 "Invalid SAS Address", /* 01h */
6044 NULL, /* 02h */
6045 "Invalid Page", /* 03h */
6046 NULL, /* 04h */
6047 "Task Terminated" /* 05h */
6048 };
6049 static char *pl_code_str[] = {
6050 NULL, /* 00h */
6051 "Open Failure", /* 01h */
6052 "Invalid Scatter Gather List", /* 02h */
6053 "Wrong Relative Offset or Frame Length", /* 03h */
6054 "Frame Transfer Error", /* 04h */
6055 "Transmit Frame Connected Low", /* 05h */
6056 "SATA Non-NCQ RW Error Bit Set", /* 06h */
6057 "SATA Read Log Receive Data Error", /* 07h */
6058 "SATA NCQ Fail All Commands After Error", /* 08h */
6059 "SATA Error in Receive Set Device Bit FIS", /* 09h */
6060 "Receive Frame Invalid Message", /* 0Ah */
6061 "Receive Context Message Valid Error", /* 0Bh */
6062 "Receive Frame Current Frame Error", /* 0Ch */
6063 "SATA Link Down", /* 0Dh */
6064 "Discovery SATA Init W IOS", /* 0Eh */
6065 "Config Invalid Page", /* 0Fh */
6066 "Discovery SATA Init Timeout", /* 10h */
6067 "Reset", /* 11h */
6068 "Abort", /* 12h */
6069 "IO Not Yet Executed", /* 13h */
6070 "IO Executed", /* 14h */
6071 NULL, /* 15h */
6072 NULL, /* 16h */
6073 NULL, /* 17h */
6074 NULL, /* 18h */
6075 NULL, /* 19h */
6076 NULL, /* 1Ah */
6077 NULL, /* 1Bh */
6078 NULL, /* 1Ch */
6079 NULL, /* 1Dh */
6080 NULL, /* 1Eh */
6081 NULL, /* 1Fh */
6082 "Enclosure Management" /* 20h */
6083 };
6084
6085/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6086/*
6087 * mpt_sas_log_info - Log information returned from SAS IOC.
6088 * @ioc: Pointer to MPT_ADAPTER structure
6089 * @log_info: U32 LogInfo reply word from the IOC
6090 *
6091 * Refer to lsi/mpi_log_sas.h.
6092 */
6093static void
6094mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info)
6095{
6096union loginfo_type {
6097 u32 loginfo;
6098 struct {
6099 u32 subcode:16;
6100 u32 code:8;
6101 u32 originator:4;
6102 u32 bus_type:4;
6103 }dw;
6104};
6105 union loginfo_type sas_loginfo;
6106 char *code_desc = NULL;
6107
6108 sas_loginfo.loginfo = log_info;
6109 if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
6110 (sas_loginfo.dw.originator < sizeof(originator_str)/sizeof(char*)))
6111 return;
6112 if ((sas_loginfo.dw.originator == 0 /*IOP*/) &&
6113 (sas_loginfo.dw.code < sizeof(iop_code_str)/sizeof(char*))) {
6114 code_desc = iop_code_str[sas_loginfo.dw.code];
6115 }else if ((sas_loginfo.dw.originator == 1 /*PL*/) &&
6116 (sas_loginfo.dw.code < sizeof(pl_code_str)/sizeof(char*) )) {
6117 code_desc = pl_code_str[sas_loginfo.dw.code];
6118 }
6119
6120 if (code_desc != NULL)
6121 printk(MYIOC_s_INFO_FMT
6122 "LogInfo(0x%08x): Originator={%s}, Code={%s},"
6123 " SubCode(0x%04x)\n",
6124 ioc->name,
6125 log_info,
6126 originator_str[sas_loginfo.dw.originator],
6127 code_desc,
6128 sas_loginfo.dw.subcode);
6129 else
6130 printk(MYIOC_s_INFO_FMT
6131 "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
6132 " SubCode(0x%04x)\n",
6133 ioc->name,
6134 log_info,
6135 originator_str[sas_loginfo.dw.originator],
6136 sas_loginfo.dw.code,
6137 sas_loginfo.dw.subcode);
6138}
6139
5656/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6140/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
5657/* 6141/*
5658 * mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC. 6142 * mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
@@ -5814,6 +6298,7 @@ EXPORT_SYMBOL(mpt_findImVolumes);
5814EXPORT_SYMBOL(mpt_read_ioc_pg_3); 6298EXPORT_SYMBOL(mpt_read_ioc_pg_3);
5815EXPORT_SYMBOL(mpt_alloc_fw_memory); 6299EXPORT_SYMBOL(mpt_alloc_fw_memory);
5816EXPORT_SYMBOL(mpt_free_fw_memory); 6300EXPORT_SYMBOL(mpt_free_fw_memory);
6301EXPORT_SYMBOL(mptbase_sas_persist_operation);
5817 6302
5818 6303
5819/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6304/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f4827d923731..75105277e22f 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -65,6 +65,7 @@
65#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */ 65#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */
66#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */ 66#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */
67#include "lsi/mpi_tool.h" /* Tools support */ 67#include "lsi/mpi_tool.h" /* Tools support */
68#include "lsi/mpi_sas.h" /* SAS support */
68 69
69/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 70/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
70 71
@@ -76,8 +77,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR 77#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
77#endif 78#endif
78 79
79#define MPT_LINUX_VERSION_COMMON "3.03.02" 80#define MPT_LINUX_VERSION_COMMON "3.03.03"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.02" 81#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.03"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 82#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 83
83#define show_mptmod_ver(s,ver) \ 84#define show_mptmod_ver(s,ver) \
@@ -423,7 +424,7 @@ typedef struct _MPT_IOCTL {
423/* 424/*
424 * Event Structure and define 425 * Event Structure and define
425 */ 426 */
426#define MPTCTL_EVENT_LOG_SIZE (0x0000000A) 427#define MPTCTL_EVENT_LOG_SIZE (0x000000032)
427typedef struct _mpt_ioctl_events { 428typedef struct _mpt_ioctl_events {
428 u32 event; /* Specified by define above */ 429 u32 event; /* Specified by define above */
429 u32 eventContext; /* Index or counter */ 430 u32 eventContext; /* Index or counter */
@@ -451,16 +452,13 @@ typedef struct _mpt_ioctl_events {
451#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */ 452#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */
452/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */ 453/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */
453 454
454typedef struct _ScsiCfgData { 455typedef struct _SpiCfgData {
455 u32 PortFlags; 456 u32 PortFlags;
456 int *nvram; /* table of device NVRAM values */ 457 int *nvram; /* table of device NVRAM values */
457 IOCPage2_t *pIocPg2; /* table of Raid Volumes */
458 IOCPage3_t *pIocPg3; /* table of physical disks */
459 IOCPage4_t *pIocPg4; /* SEP devices addressing */ 458 IOCPage4_t *pIocPg4; /* SEP devices addressing */
460 dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */ 459 dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */
461 int IocPg4Sz; /* IOCPage4 size */ 460 int IocPg4Sz; /* IOCPage4 size */
462 u8 dvStatus[MPT_MAX_SCSI_DEVICES]; 461 u8 dvStatus[MPT_MAX_SCSI_DEVICES];
463 int isRaid; /* bit field, 1 if RAID */
464 u8 minSyncFactor; /* 0xFF if async */ 462 u8 minSyncFactor; /* 0xFF if async */
465 u8 maxSyncOffset; /* 0 if async */ 463 u8 maxSyncOffset; /* 0 if async */
466 u8 maxBusWidth; /* 0 if narrow, 1 if wide */ 464 u8 maxBusWidth; /* 0 if narrow, 1 if wide */
@@ -472,10 +470,28 @@ typedef struct _ScsiCfgData {
472 u8 dvScheduled; /* 1 if scheduled */ 470 u8 dvScheduled; /* 1 if scheduled */
473 u8 forceDv; /* 1 to force DV scheduling */ 471 u8 forceDv; /* 1 to force DV scheduling */
474 u8 noQas; /* Disable QAS for this adapter */ 472 u8 noQas; /* Disable QAS for this adapter */
475 u8 Saf_Te; /* 1 to force all Processors as SAF-TE if Inquiry data length is too short to check for SAF-TE */ 473 u8 Saf_Te; /* 1 to force all Processors as
474 * SAF-TE if Inquiry data length
475 * is too short to check for SAF-TE
476 */
476 u8 mpt_dv; /* command line option: enhanced=1, basic=0 */ 477 u8 mpt_dv; /* command line option: enhanced=1, basic=0 */
478 u8 bus_reset; /* 1 to allow bus reset */
477 u8 rsvd[1]; 479 u8 rsvd[1];
478} ScsiCfgData; 480}SpiCfgData;
481
482typedef struct _SasCfgData {
483 u8 ptClear; /* 1 to automatically clear the
484 * persistent table.
485 * 0 to disable
486 * automatic clearing.
487 */
488}SasCfgData;
489
490typedef struct _RaidCfgData {
491 IOCPage2_t *pIocPg2; /* table of Raid Volumes */
492 IOCPage3_t *pIocPg3; /* table of physical disks */
493 int isRaid; /* bit field, 1 if RAID */
494}RaidCfgData;
479 495
480/* 496/*
481 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 497 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
@@ -530,11 +546,16 @@ typedef struct _MPT_ADAPTER
530 u8 *sense_buf_pool; 546 u8 *sense_buf_pool;
531 dma_addr_t sense_buf_pool_dma; 547 dma_addr_t sense_buf_pool_dma;
532 u32 sense_buf_low_dma; 548 u32 sense_buf_low_dma;
549 u8 *HostPageBuffer; /* SAS - host page buffer support */
550 u32 HostPageBuffer_sz;
551 dma_addr_t HostPageBuffer_dma;
533 int mtrr_reg; 552 int mtrr_reg;
534 struct pci_dev *pcidev; /* struct pci_dev pointer */ 553 struct pci_dev *pcidev; /* struct pci_dev pointer */
535 u8 __iomem *memmap; /* mmap address */ 554 u8 __iomem *memmap; /* mmap address */
536 struct Scsi_Host *sh; /* Scsi Host pointer */ 555 struct Scsi_Host *sh; /* Scsi Host pointer */
537 ScsiCfgData spi_data; /* Scsi config. data */ 556 SpiCfgData spi_data; /* Scsi config. data */
557 RaidCfgData raid_data; /* Raid config. data */
558 SasCfgData sas_data; /* Sas config. data */
538 MPT_IOCTL *ioctl; /* ioctl data pointer */ 559 MPT_IOCTL *ioctl; /* ioctl data pointer */
539 struct proc_dir_entry *ioc_dentry; 560 struct proc_dir_entry *ioc_dentry;
540 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ 561 struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
@@ -554,31 +575,35 @@ typedef struct _MPT_ADAPTER
554#else 575#else
555 u32 mfcnt; 576 u32 mfcnt;
556#endif 577#endif
557 u32 NB_for_64_byte_frame; 578 u32 NB_for_64_byte_frame;
558 u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)]; 579 u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)];
559 u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)]; 580 u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)];
560 IOCFactsReply_t facts; 581 IOCFactsReply_t facts;
561 PortFactsReply_t pfacts[2]; 582 PortFactsReply_t pfacts[2];
562 FCPortPage0_t fc_port_page0[2]; 583 FCPortPage0_t fc_port_page0[2];
584 struct timer_list persist_timer; /* persist table timer */
585 int persist_wait_done; /* persist completion flag */
586 u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
563 LANPage0_t lan_cnfg_page0; 587 LANPage0_t lan_cnfg_page0;
564 LANPage1_t lan_cnfg_page1; 588 LANPage1_t lan_cnfg_page1;
565 /* 589 /*
566 * Description: errata_flag_1064 590 * Description: errata_flag_1064
567 * If a PCIX read occurs within 1 or 2 cycles after the chip receives 591 * If a PCIX read occurs within 1 or 2 cycles after the chip receives
568 * a split completion for a read data, an internal address pointer incorrectly 592 * a split completion for a read data, an internal address pointer incorrectly
569 * increments by 32 bytes 593 * increments by 32 bytes
570 */ 594 */
571 int errata_flag_1064; 595 int errata_flag_1064;
572 u8 FirstWhoInit; 596 u8 FirstWhoInit;
573 u8 upload_fw; /* If set, do a fw upload */ 597 u8 upload_fw; /* If set, do a fw upload */
574 u8 reload_fw; /* Force a FW Reload on next reset */ 598 u8 reload_fw; /* Force a FW Reload on next reset */
575 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ 599 u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
576 u8 pad1[4]; 600 u8 pad1[4];
577 int DoneCtx; 601 int DoneCtx;
578 int TaskCtx; 602 int TaskCtx;
579 int InternalCtx; 603 int InternalCtx;
580 struct list_head list; 604 struct list_head list;
581 struct net_device *netdev; 605 struct net_device *netdev;
606 struct list_head sas_topology;
582} MPT_ADAPTER; 607} MPT_ADAPTER;
583 608
584/* 609/*
@@ -964,6 +989,7 @@ extern void mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
964extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); 989extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
965extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 990extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
966extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc); 991extern int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
992extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
967 993
968/* 994/*
969 * Public data decl's... 995 * Public data decl's...
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7577c2417e2e..cb2d59d5f5af 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1326,7 +1326,7 @@ mptctl_gettargetinfo (unsigned long arg)
1326 */ 1326 */
1327 if (hd && hd->Targets) { 1327 if (hd && hd->Targets) {
1328 mpt_findImVolumes(ioc); 1328 mpt_findImVolumes(ioc);
1329 pIoc2 = ioc->spi_data.pIocPg2; 1329 pIoc2 = ioc->raid_data.pIocPg2;
1330 for ( id = 0; id <= max_id; ) { 1330 for ( id = 0; id <= max_id; ) {
1331 if ( pIoc2 && pIoc2->NumActiveVolumes ) { 1331 if ( pIoc2 && pIoc2->NumActiveVolumes ) {
1332 if ( id == pIoc2->RaidVolume[0].VolumeID ) { 1332 if ( id == pIoc2->RaidVolume[0].VolumeID ) {
@@ -1348,7 +1348,7 @@ mptctl_gettargetinfo (unsigned long arg)
1348 --maxWordsLeft; 1348 --maxWordsLeft;
1349 goto next_id; 1349 goto next_id;
1350 } else { 1350 } else {
1351 pIoc3 = ioc->spi_data.pIocPg3; 1351 pIoc3 = ioc->raid_data.pIocPg3;
1352 for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) { 1352 for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) {
1353 if ( pIoc3->PhysDisk[jj].PhysDiskID == id ) 1353 if ( pIoc3->PhysDisk[jj].PhysDiskID == id )
1354 goto next_id; 1354 goto next_id;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 13771abea13f..a628be9bbbad 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -189,7 +189,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
189 printk(MYIOC_s_WARN_FMT 189 printk(MYIOC_s_WARN_FMT
190 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", 190 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
191 ioc->name, ioc); 191 ioc->name, ioc);
192 return -ENODEV; 192 return 0;
193 } 193 }
194 194
195 sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); 195 sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 52794be5a95c..ed3c891e388f 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -312,7 +312,12 @@ static int
312mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 312mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
313{ 313{
314 struct net_device *dev = ioc->netdev; 314 struct net_device *dev = ioc->netdev;
315 struct mpt_lan_priv *priv = netdev_priv(dev); 315 struct mpt_lan_priv *priv;
316
317 if (dev == NULL)
318 return(1);
319 else
320 priv = netdev_priv(dev);
316 321
317 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", 322 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
318 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( 323 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
new file mode 100644
index 000000000000..429820e48c69
--- /dev/null
+++ b/drivers/message/fusion/mptsas.c
@@ -0,0 +1,1235 @@
1/*
2 * linux/drivers/message/fusion/mptsas.c
3 * For use with LSI Logic PCI chip/adapter(s)
4 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
5 *
6 * Copyright (c) 1999-2005 LSI Logic Corporation
7 * (mailto:mpt_linux_developer@lsil.com)
8 * Copyright (c) 2005 Dell
9 */
10/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
11/*
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; version 2 of the License.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 NO WARRANTY
22 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
23 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
24 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
25 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
26 solely responsible for determining the appropriateness of using and
27 distributing the Program and assumes all risks associated with its
28 exercise of rights under this Agreement, including but not limited to
29 the risks and costs of program errors, damage to or loss of data,
30 programs or equipment, and unavailability or interruption of operations.
31
32 DISCLAIMER OF LIABILITY
33 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
34 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
36 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
39 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40
41 You should have received a copy of the GNU General Public License
42 along with this program; if not, write to the Free Software
43 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
44*/
45/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
46
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/init.h>
50#include <linux/errno.h>
51#include <linux/sched.h>
52#include <linux/workqueue.h>
53
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_host.h>
57#include <scsi/scsi_transport_sas.h>
58
59#include "mptbase.h"
60#include "mptscsih.h"
61
62
63#define my_NAME "Fusion MPT SAS Host driver"
64#define my_VERSION MPT_LINUX_VERSION_COMMON
65#define MYNAM "mptsas"
66
67MODULE_AUTHOR(MODULEAUTHOR);
68MODULE_DESCRIPTION(my_NAME);
69MODULE_LICENSE("GPL");
70
71static int mpt_pq_filter;
72module_param(mpt_pq_filter, int, 0);
73MODULE_PARM_DESC(mpt_pq_filter,
74 "Enable peripheral qualifier filter: enable=1 "
75 "(default=0)");
76
77static int mpt_pt_clear;
78module_param(mpt_pt_clear, int, 0);
79MODULE_PARM_DESC(mpt_pt_clear,
80 "Clear persistency table: enable=1 "
81 "(default=MPTSCSIH_PT_CLEAR=0)");
82
83static int mptsasDoneCtx = -1;
84static int mptsasTaskCtx = -1;
85static int mptsasInternalCtx = -1; /* Used only for internal commands */
86
87
88/*
89 * SAS topology structures
90 *
91 * The MPT Fusion firmware interface spreads information about the
92 * SAS topology over many manufacture pages, thus we need some data
93 * structure to collect it and process it for the SAS transport class.
94 */
95
96struct mptsas_devinfo {
97 u16 handle; /* unique id to address this device */
98 u8 phy_id; /* phy number of parent device */
99 u8 port_id; /* sas physical port this device
100 is assoc'd with */
101 u8 target; /* logical target id of this device */
102 u8 bus; /* logical bus number of this device */
103 u64 sas_address; /* WWN of this device,
104 SATA is assigned by HBA,expander */
105 u32 device_info; /* bitfield detailed info about this device */
106};
107
108struct mptsas_phyinfo {
109 u8 phy_id; /* phy index */
110 u8 port_id; /* port number this phy is part of */
111 u8 negotiated_link_rate; /* nego'd link rate for this phy */
112 u8 hw_link_rate; /* hardware max/min phys link rate */
113 u8 programmed_link_rate; /* programmed max/min phy link rate */
114 struct mptsas_devinfo identify; /* point to phy device info */
115 struct mptsas_devinfo attached; /* point to attached device info */
116 struct sas_rphy *rphy;
117};
118
119struct mptsas_portinfo {
120 struct list_head list;
121 u16 handle; /* unique id to address this */
122 u8 num_phys; /* number of phys */
123 struct mptsas_phyinfo *phy_info;
124};
125
126/*
127 * This is pretty ugly. We will be able to seriously clean it up
128 * once the DV code in mptscsih goes away and we can properly
129 * implement ->target_alloc.
130 */
131static int
132mptsas_slave_alloc(struct scsi_device *device)
133{
134 struct Scsi_Host *host = device->host;
135 MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
136 struct sas_rphy *rphy;
137 struct mptsas_portinfo *p;
138 VirtDevice *vdev;
139 uint target = device->id;
140 int i;
141
142 if ((vdev = hd->Targets[target]) != NULL)
143 goto out;
144
145 vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
146 if (!vdev) {
147 printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
148 hd->ioc->name, sizeof(VirtDevice));
149 return -ENOMEM;
150 }
151
152 memset(vdev, 0, sizeof(VirtDevice));
153 vdev->tflags = MPT_TARGET_FLAGS_Q_YES|MPT_TARGET_FLAGS_VALID_INQUIRY;
154 vdev->ioc_id = hd->ioc->id;
155
156 rphy = dev_to_rphy(device->sdev_target->dev.parent);
157 list_for_each_entry(p, &hd->ioc->sas_topology, list) {
158 for (i = 0; i < p->num_phys; i++) {
159 if (p->phy_info[i].attached.sas_address ==
160 rphy->identify.sas_address) {
161 vdev->target_id =
162 p->phy_info[i].attached.target;
163 vdev->bus_id = p->phy_info[i].attached.bus;
164 hd->Targets[device->id] = vdev;
165 goto out;
166 }
167 }
168 }
169
170 printk("No matching SAS device found!!\n");
171 kfree(vdev);
172 return -ENODEV;
173
174 out:
175 vdev->num_luns++;
176 device->hostdata = vdev;
177 return 0;
178}
179
180static struct scsi_host_template mptsas_driver_template = {
181 .proc_name = "mptsas",
182 .proc_info = mptscsih_proc_info,
183 .name = "MPT SPI Host",
184 .info = mptscsih_info,
185 .queuecommand = mptscsih_qcmd,
186 .slave_alloc = mptsas_slave_alloc,
187 .slave_configure = mptscsih_slave_configure,
188 .slave_destroy = mptscsih_slave_destroy,
189 .change_queue_depth = mptscsih_change_queue_depth,
190 .eh_abort_handler = mptscsih_abort,
191 .eh_device_reset_handler = mptscsih_dev_reset,
192 .eh_bus_reset_handler = mptscsih_bus_reset,
193 .eh_host_reset_handler = mptscsih_host_reset,
194 .bios_param = mptscsih_bios_param,
195 .can_queue = MPT_FC_CAN_QUEUE,
196 .this_id = -1,
197 .sg_tablesize = MPT_SCSI_SG_DEPTH,
198 .max_sectors = 8192,
199 .cmd_per_lun = 7,
200 .use_clustering = ENABLE_CLUSTERING,
201};
202
203static struct sas_function_template mptsas_transport_functions = {
204};
205
206static struct scsi_transport_template *mptsas_transport_template;
207
208#ifdef SASDEBUG
209static void mptsas_print_phy_data(MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
210{
211 printk("---- IO UNIT PAGE 0 ------------\n");
212 printk("Handle=0x%X\n",
213 le16_to_cpu(phy_data->AttachedDeviceHandle));
214 printk("Controller Handle=0x%X\n",
215 le16_to_cpu(phy_data->ControllerDevHandle));
216 printk("Port=0x%X\n", phy_data->Port);
217 printk("Port Flags=0x%X\n", phy_data->PortFlags);
218 printk("PHY Flags=0x%X\n", phy_data->PhyFlags);
219 printk("Negotiated Link Rate=0x%X\n", phy_data->NegotiatedLinkRate);
220 printk("Controller PHY Device Info=0x%X\n",
221 le32_to_cpu(phy_data->ControllerPhyDeviceInfo));
222 printk("DiscoveryStatus=0x%X\n",
223 le32_to_cpu(phy_data->DiscoveryStatus));
224 printk("\n");
225}
226
227static void mptsas_print_phy_pg0(SasPhyPage0_t *pg0)
228{
229 __le64 sas_address;
230
231 memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
232
233 printk("---- SAS PHY PAGE 0 ------------\n");
234 printk("Attached Device Handle=0x%X\n",
235 le16_to_cpu(pg0->AttachedDevHandle));
236 printk("SAS Address=0x%llX\n",
237 (unsigned long long)le64_to_cpu(sas_address));
238 printk("Attached PHY Identifier=0x%X\n", pg0->AttachedPhyIdentifier);
239 printk("Attached Device Info=0x%X\n",
240 le32_to_cpu(pg0->AttachedDeviceInfo));
241 printk("Programmed Link Rate=0x%X\n", pg0->ProgrammedLinkRate);
242 printk("Change Count=0x%X\n", pg0->ChangeCount);
243 printk("PHY Info=0x%X\n", le32_to_cpu(pg0->PhyInfo));
244 printk("\n");
245}
246
247static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
248{
249 __le64 sas_address;
250
251 memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
252
253 printk("---- SAS DEVICE PAGE 0 ---------\n");
254 printk("Handle=0x%X\n" ,le16_to_cpu(pg0->DevHandle));
255 printk("Enclosure Handle=0x%X\n", le16_to_cpu(pg0->EnclosureHandle));
256 printk("Slot=0x%X\n", le16_to_cpu(pg0->Slot));
257 printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
258 printk("Target ID=0x%X\n", pg0->TargetID);
259 printk("Bus=0x%X\n", pg0->Bus);
260 printk("PhyNum=0x%X\n", pg0->PhyNum);
261 printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
262 printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
263 printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
264 printk("Physical Port=0x%X\n", pg0->PhysicalPort);
265 printk("\n");
266}
267
268static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
269{
270 printk("---- SAS EXPANDER PAGE 1 ------------\n");
271
272 printk("Physical Port=0x%X\n", pg1->PhysicalPort);
273 printk("PHY Identifier=0x%X\n", pg1->Phy);
274 printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
275 printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
276 printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
277 printk("Owner Device Handle=0x%X\n",
278 le16_to_cpu(pg1->OwnerDevHandle));
279 printk("Attached Device Handle=0x%X\n",
280 le16_to_cpu(pg1->AttachedDevHandle));
281}
282#else
283#define mptsas_print_phy_data(phy_data) do { } while (0)
284#define mptsas_print_phy_pg0(pg0) do { } while (0)
285#define mptsas_print_device_pg0(pg0) do { } while (0)
286#define mptsas_print_expander_pg1(pg1) do { } while (0)
287#endif
288
289static int
290mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
291{
292 ConfigExtendedPageHeader_t hdr;
293 CONFIGPARMS cfg;
294 SasIOUnitPage0_t *buffer;
295 dma_addr_t dma_handle;
296 int error, i;
297
298 hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
299 hdr.ExtPageLength = 0;
300 hdr.PageNumber = 0;
301 hdr.Reserved1 = 0;
302 hdr.Reserved2 = 0;
303 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
304 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
305
306 cfg.cfghdr.ehdr = &hdr;
307 cfg.physAddr = -1;
308 cfg.pageAddr = 0;
309 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
310 cfg.dir = 0; /* read */
311 cfg.timeout = 10;
312
313 error = mpt_config(ioc, &cfg);
314 if (error)
315 goto out;
316 if (!hdr.ExtPageLength) {
317 error = -ENXIO;
318 goto out;
319 }
320
321 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
322 &dma_handle);
323 if (!buffer) {
324 error = -ENOMEM;
325 goto out;
326 }
327
328 cfg.physAddr = dma_handle;
329 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
330
331 error = mpt_config(ioc, &cfg);
332 if (error)
333 goto out_free_consistent;
334
335 port_info->num_phys = buffer->NumPhys;
336 port_info->phy_info = kcalloc(port_info->num_phys,
337 sizeof(struct mptsas_phyinfo),GFP_KERNEL);
338 if (!port_info->phy_info) {
339 error = -ENOMEM;
340 goto out_free_consistent;
341 }
342
343 for (i = 0; i < port_info->num_phys; i++) {
344 mptsas_print_phy_data(&buffer->PhyData[i]);
345 port_info->phy_info[i].phy_id = i;
346 port_info->phy_info[i].port_id =
347 buffer->PhyData[i].Port;
348 port_info->phy_info[i].negotiated_link_rate =
349 buffer->PhyData[i].NegotiatedLinkRate;
350 }
351
352 out_free_consistent:
353 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
354 buffer, dma_handle);
355 out:
356 return error;
357}
358
359static int
360mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
361 u32 form, u32 form_specific)
362{
363 ConfigExtendedPageHeader_t hdr;
364 CONFIGPARMS cfg;
365 SasPhyPage0_t *buffer;
366 dma_addr_t dma_handle;
367 int error;
368
369 hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
370 hdr.ExtPageLength = 0;
371 hdr.PageNumber = 0;
372 hdr.Reserved1 = 0;
373 hdr.Reserved2 = 0;
374 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
375 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
376
377 cfg.cfghdr.ehdr = &hdr;
378 cfg.dir = 0; /* read */
379 cfg.timeout = 10;
380
381 /* Get Phy Pg 0 for each Phy. */
382 cfg.physAddr = -1;
383 cfg.pageAddr = form + form_specific;
384 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
385
386 error = mpt_config(ioc, &cfg);
387 if (error)
388 goto out;
389
390 if (!hdr.ExtPageLength) {
391 error = -ENXIO;
392 goto out;
393 }
394
395 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
396 &dma_handle);
397 if (!buffer) {
398 error = -ENOMEM;
399 goto out;
400 }
401
402 cfg.physAddr = dma_handle;
403 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
404
405 error = mpt_config(ioc, &cfg);
406 if (error)
407 goto out_free_consistent;
408
409 mptsas_print_phy_pg0(buffer);
410
411 phy_info->hw_link_rate = buffer->HwLinkRate;
412 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
413 phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
414 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
415
416 out_free_consistent:
417 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
418 buffer, dma_handle);
419 out:
420 return error;
421}
422
423static int
424mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
425 u32 form, u32 form_specific)
426{
427 ConfigExtendedPageHeader_t hdr;
428 CONFIGPARMS cfg;
429 SasDevicePage0_t *buffer;
430 dma_addr_t dma_handle;
431 __le64 sas_address;
432 int error;
433
434 hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
435 hdr.ExtPageLength = 0;
436 hdr.PageNumber = 0;
437 hdr.Reserved1 = 0;
438 hdr.Reserved2 = 0;
439 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
440 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
441
442 cfg.cfghdr.ehdr = &hdr;
443 cfg.pageAddr = form + form_specific;
444 cfg.physAddr = -1;
445 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
446 cfg.dir = 0; /* read */
447 cfg.timeout = 10;
448
449 error = mpt_config(ioc, &cfg);
450 if (error)
451 goto out;
452 if (!hdr.ExtPageLength) {
453 error = -ENXIO;
454 goto out;
455 }
456
457 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
458 &dma_handle);
459 if (!buffer) {
460 error = -ENOMEM;
461 goto out;
462 }
463
464 cfg.physAddr = dma_handle;
465 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
466
467 error = mpt_config(ioc, &cfg);
468 if (error)
469 goto out_free_consistent;
470
471 mptsas_print_device_pg0(buffer);
472
473 device_info->handle = le16_to_cpu(buffer->DevHandle);
474 device_info->phy_id = buffer->PhyNum;
475 device_info->port_id = buffer->PhysicalPort;
476 device_info->target = buffer->TargetID;
477 device_info->bus = buffer->Bus;
478 memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
479 device_info->sas_address = le64_to_cpu(sas_address);
480 device_info->device_info =
481 le32_to_cpu(buffer->DeviceInfo);
482
483 out_free_consistent:
484 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
485 buffer, dma_handle);
486 out:
487 return error;
488}
489
490static int
491mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
492 u32 form, u32 form_specific)
493{
494 ConfigExtendedPageHeader_t hdr;
495 CONFIGPARMS cfg;
496 SasExpanderPage0_t *buffer;
497 dma_addr_t dma_handle;
498 int error;
499
500 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
501 hdr.ExtPageLength = 0;
502 hdr.PageNumber = 0;
503 hdr.Reserved1 = 0;
504 hdr.Reserved2 = 0;
505 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
506 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
507
508 cfg.cfghdr.ehdr = &hdr;
509 cfg.physAddr = -1;
510 cfg.pageAddr = form + form_specific;
511 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
512 cfg.dir = 0; /* read */
513 cfg.timeout = 10;
514
515 error = mpt_config(ioc, &cfg);
516 if (error)
517 goto out;
518
519 if (!hdr.ExtPageLength) {
520 error = -ENXIO;
521 goto out;
522 }
523
524 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
525 &dma_handle);
526 if (!buffer) {
527 error = -ENOMEM;
528 goto out;
529 }
530
531 cfg.physAddr = dma_handle;
532 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
533
534 error = mpt_config(ioc, &cfg);
535 if (error)
536 goto out_free_consistent;
537
538 /* save config data */
539 port_info->num_phys = buffer->NumPhys;
540 port_info->handle = le16_to_cpu(buffer->DevHandle);
541 port_info->phy_info = kcalloc(port_info->num_phys,
542 sizeof(struct mptsas_phyinfo),GFP_KERNEL);
543 if (!port_info->phy_info) {
544 error = -ENOMEM;
545 goto out_free_consistent;
546 }
547
548 out_free_consistent:
549 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
550 buffer, dma_handle);
551 out:
552 return error;
553}
554
555static int
556mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
557 u32 form, u32 form_specific)
558{
559 ConfigExtendedPageHeader_t hdr;
560 CONFIGPARMS cfg;
561 SasExpanderPage1_t *buffer;
562 dma_addr_t dma_handle;
563 int error;
564
565 hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
566 hdr.ExtPageLength = 0;
567 hdr.PageNumber = 1;
568 hdr.Reserved1 = 0;
569 hdr.Reserved2 = 0;
570 hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
571 hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
572
573 cfg.cfghdr.ehdr = &hdr;
574 cfg.physAddr = -1;
575 cfg.pageAddr = form + form_specific;
576 cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
577 cfg.dir = 0; /* read */
578 cfg.timeout = 10;
579
580 error = mpt_config(ioc, &cfg);
581 if (error)
582 goto out;
583
584 if (!hdr.ExtPageLength) {
585 error = -ENXIO;
586 goto out;
587 }
588
589 buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
590 &dma_handle);
591 if (!buffer) {
592 error = -ENOMEM;
593 goto out;
594 }
595
596 cfg.physAddr = dma_handle;
597 cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
598
599 error = mpt_config(ioc, &cfg);
600 if (error)
601 goto out_free_consistent;
602
603
604 mptsas_print_expander_pg1(buffer);
605
606 /* save config data */
607 phy_info->phy_id = buffer->Phy;
608 phy_info->port_id = buffer->PhysicalPort;
609 phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
610 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
611 phy_info->hw_link_rate = buffer->HwLinkRate;
612 phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
613 phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
614
615
616 out_free_consistent:
617 pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
618 buffer, dma_handle);
619 out:
620 return error;
621}
622
623static void
624mptsas_parse_device_info(struct sas_identify *identify,
625 struct mptsas_devinfo *device_info)
626{
627 u16 protocols;
628
629 identify->sas_address = device_info->sas_address;
630 identify->phy_identifier = device_info->phy_id;
631
632 /*
633 * Fill in Phy Initiator Port Protocol.
634 * Bits 6:3, more than one bit can be set, fall through cases.
635 */
636 protocols = device_info->device_info & 0x78;
637 identify->initiator_port_protocols = 0;
638 if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
639 identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
640 if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
641 identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
642 if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
643 identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
644 if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
645 identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
646
647 /*
648 * Fill in Phy Target Port Protocol.
649 * Bits 10:7, more than one bit can be set, fall through cases.
650 */
651 protocols = device_info->device_info & 0x780;
652 identify->target_port_protocols = 0;
653 if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
654 identify->target_port_protocols |= SAS_PROTOCOL_SSP;
655 if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
656 identify->target_port_protocols |= SAS_PROTOCOL_STP;
657 if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
658 identify->target_port_protocols |= SAS_PROTOCOL_SMP;
659 if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
660 identify->target_port_protocols |= SAS_PROTOCOL_SATA;
661
662 /*
663 * Fill in Attached device type.
664 */
665 switch (device_info->device_info &
666 MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
667 case MPI_SAS_DEVICE_INFO_NO_DEVICE:
668 identify->device_type = SAS_PHY_UNUSED;
669 break;
670 case MPI_SAS_DEVICE_INFO_END_DEVICE:
671 identify->device_type = SAS_END_DEVICE;
672 break;
673 case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
674 identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
675 break;
676 case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
677 identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
678 break;
679 }
680}
681
682static int mptsas_probe_one_phy(struct device *dev,
683 struct mptsas_phyinfo *phy_info, int index)
684{
685 struct sas_phy *port;
686 int error;
687
688 port = sas_phy_alloc(dev, index);
689 if (!port)
690 return -ENOMEM;
691
692 port->port_identifier = phy_info->port_id;
693 mptsas_parse_device_info(&port->identify, &phy_info->identify);
694
695 /*
696 * Set Negotiated link rate.
697 */
698 switch (phy_info->negotiated_link_rate) {
699 case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
700 port->negotiated_linkrate = SAS_PHY_DISABLED;
701 break;
702 case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
703 port->negotiated_linkrate = SAS_LINK_RATE_FAILED;
704 break;
705 case MPI_SAS_IOUNIT0_RATE_1_5:
706 port->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
707 break;
708 case MPI_SAS_IOUNIT0_RATE_3_0:
709 port->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
710 break;
711 case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
712 case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
713 default:
714 port->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
715 break;
716 }
717
718 /*
719 * Set Max hardware link rate.
720 */
721 switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
722 case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
723 port->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
724 break;
725 case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
726 port->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
727 break;
728 default:
729 break;
730 }
731
732 /*
733 * Set Max programmed link rate.
734 */
735 switch (phy_info->programmed_link_rate &
736 MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
737 case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
738 port->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
739 break;
740 case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
741 port->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
742 break;
743 default:
744 break;
745 }
746
747 /*
748 * Set Min hardware link rate.
749 */
750 switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
751 case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
752 port->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
753 break;
754 case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
755 port->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
756 break;
757 default:
758 break;
759 }
760
761 /*
762 * Set Min programmed link rate.
763 */
764 switch (phy_info->programmed_link_rate &
765 MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
766 case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
767 port->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
768 break;
769 case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
770 port->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
771 break;
772 default:
773 break;
774 }
775
776 error = sas_phy_add(port);
777 if (error) {
778 sas_phy_free(port);
779 return error;
780 }
781
782 if (phy_info->attached.handle) {
783 struct sas_rphy *rphy;
784
785 rphy = sas_rphy_alloc(port);
786 if (!rphy)
787 return 0; /* non-fatal: an rphy can be added later */
788
789 mptsas_parse_device_info(&rphy->identify, &phy_info->attached);
790 error = sas_rphy_add(rphy);
791 if (error) {
792 sas_rphy_free(rphy);
793 return error;
794 }
795
796 phy_info->rphy = rphy;
797 }
798
799 return 0;
800}
801
802static int
803mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
804{
805 struct mptsas_portinfo *port_info;
806 u32 handle = 0xFFFF;
807 int error = -ENOMEM, i;
808
809 port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
810 if (!port_info)
811 goto out;
812 memset(port_info, 0, sizeof(*port_info));
813
814 error = mptsas_sas_io_unit_pg0(ioc, port_info);
815 if (error)
816 goto out_free_port_info;
817
818 list_add_tail(&port_info->list, &ioc->sas_topology);
819
820 for (i = 0; i < port_info->num_phys; i++) {
821 mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
822 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
823 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
824
825 mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
826 (MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
827 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
828 handle = port_info->phy_info[i].identify.handle;
829
830 if (port_info->phy_info[i].attached.handle) {
831 mptsas_sas_device_pg0(ioc,
832 &port_info->phy_info[i].attached,
833 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
834 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
835 port_info->phy_info[i].attached.handle);
836 }
837
838 mptsas_probe_one_phy(&ioc->sh->shost_gendev,
839 &port_info->phy_info[i], *index);
840 (*index)++;
841 }
842
843 return 0;
844
845 out_free_port_info:
846 kfree(port_info);
847 out:
848 return error;
849}
850
851static int
852mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
853{
854 struct mptsas_portinfo *port_info, *p;
855 int error = -ENOMEM, i, j;
856
857 port_info = kmalloc(sizeof(*port_info), GFP_KERNEL);
858 if (!port_info)
859 goto out;
860 memset(port_info, 0, sizeof(*port_info));
861
862 error = mptsas_sas_expander_pg0(ioc, port_info,
863 (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
864 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
865 if (error)
866 goto out_free_port_info;
867
868 *handle = port_info->handle;
869
870 list_add_tail(&port_info->list, &ioc->sas_topology);
871 for (i = 0; i < port_info->num_phys; i++) {
872 struct device *parent;
873
874 mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
875 (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
876 MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
877
878 if (port_info->phy_info[i].identify.handle) {
879 mptsas_sas_device_pg0(ioc,
880 &port_info->phy_info[i].identify,
881 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
882 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
883 port_info->phy_info[i].identify.handle);
884 }
885
886 if (port_info->phy_info[i].attached.handle) {
887 mptsas_sas_device_pg0(ioc,
888 &port_info->phy_info[i].attached,
889 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
890 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
891 port_info->phy_info[i].attached.handle);
892 }
893
894 /*
895 * If we find a parent port handle this expander is
896 * attached to another expander, else it hangs of the
897 * HBA phys.
898 */
899 parent = &ioc->sh->shost_gendev;
900 list_for_each_entry(p, &ioc->sas_topology, list) {
901 for (j = 0; j < p->num_phys; j++) {
902 if (port_info->phy_info[i].identify.handle ==
903 p->phy_info[j].attached.handle)
904 parent = &p->phy_info[j].rphy->dev;
905 }
906 }
907
908 mptsas_probe_one_phy(parent, &port_info->phy_info[i], *index);
909 (*index)++;
910 }
911
912 return 0;
913
914 out_free_port_info:
915 kfree(port_info);
916 out:
917 return error;
918}
919
920static void
921mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
922{
923 u32 handle = 0xFFFF;
924 int index = 0;
925
926 mptsas_probe_hba_phys(ioc, &index);
927 while (!mptsas_probe_expander_phys(ioc, &handle, &index))
928 ;
929}
930
931static int
932mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
933{
934 struct Scsi_Host *sh;
935 MPT_SCSI_HOST *hd;
936 MPT_ADAPTER *ioc;
937 unsigned long flags;
938 int sz, ii;
939 int numSGE = 0;
940 int scale;
941 int ioc_cap;
942 u8 *mem;
943 int error=0;
944 int r;
945
946 r = mpt_attach(pdev,id);
947 if (r)
948 return r;
949
950 ioc = pci_get_drvdata(pdev);
951 ioc->DoneCtx = mptsasDoneCtx;
952 ioc->TaskCtx = mptsasTaskCtx;
953 ioc->InternalCtx = mptsasInternalCtx;
954
955 /* Added sanity check on readiness of the MPT adapter.
956 */
957 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
958 printk(MYIOC_s_WARN_FMT
959 "Skipping because it's not operational!\n",
960 ioc->name);
961 return -ENODEV;
962 }
963
964 if (!ioc->active) {
965 printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
966 ioc->name);
967 return -ENODEV;
968 }
969
970 /* Sanity check - ensure at least 1 port is INITIATOR capable
971 */
972 ioc_cap = 0;
973 for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
974 if (ioc->pfacts[ii].ProtocolFlags &
975 MPI_PORTFACTS_PROTOCOL_INITIATOR)
976 ioc_cap++;
977 }
978
979 if (!ioc_cap) {
980 printk(MYIOC_s_WARN_FMT
981 "Skipping ioc=%p because SCSI Initiator mode "
982 "is NOT enabled!\n", ioc->name, ioc);
983 return 0;
984 }
985
986 sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
987 if (!sh) {
988 printk(MYIOC_s_WARN_FMT
989 "Unable to register controller with SCSI subsystem\n",
990 ioc->name);
991 return -1;
992 }
993
994 spin_lock_irqsave(&ioc->FreeQlock, flags);
995
996 /* Attach the SCSI Host to the IOC structure
997 */
998 ioc->sh = sh;
999
1000 sh->io_port = 0;
1001 sh->n_io_port = 0;
1002 sh->irq = 0;
1003
1004 /* set 16 byte cdb's */
1005 sh->max_cmd_len = 16;
1006
1007 sh->max_id = ioc->pfacts->MaxDevices + 1;
1008
1009 sh->transportt = mptsas_transport_template;
1010
1011 sh->max_lun = MPT_LAST_LUN + 1;
1012 sh->max_channel = 0;
1013 sh->this_id = ioc->pfacts[0].PortSCSIID;
1014
1015 /* Required entry.
1016 */
1017 sh->unique_id = ioc->id;
1018
1019 INIT_LIST_HEAD(&ioc->sas_topology);
1020
1021 /* Verify that we won't exceed the maximum
1022 * number of chain buffers
1023 * We can optimize: ZZ = req_sz/sizeof(SGE)
1024 * For 32bit SGE's:
1025 * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
1026 * + (req_sz - 64)/sizeof(SGE)
1027 * A slightly different algorithm is required for
1028 * 64bit SGEs.
1029 */
1030 scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
1031 if (sizeof(dma_addr_t) == sizeof(u64)) {
1032 numSGE = (scale - 1) *
1033 (ioc->facts.MaxChainDepth-1) + scale +
1034 (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
1035 sizeof(u32));
1036 } else {
1037 numSGE = 1 + (scale - 1) *
1038 (ioc->facts.MaxChainDepth-1) + scale +
1039 (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
1040 sizeof(u32));
1041 }
1042
1043 if (numSGE < sh->sg_tablesize) {
1044 /* Reset this value */
1045 dprintk((MYIOC_s_INFO_FMT
1046 "Resetting sg_tablesize to %d from %d\n",
1047 ioc->name, numSGE, sh->sg_tablesize));
1048 sh->sg_tablesize = numSGE;
1049 }
1050
1051 spin_unlock_irqrestore(&ioc->FreeQlock, flags);
1052
1053 hd = (MPT_SCSI_HOST *) sh->hostdata;
1054 hd->ioc = ioc;
1055
1056 /* SCSI needs scsi_cmnd lookup table!
1057 * (with size equal to req_depth*PtrSz!)
1058 */
1059 sz = ioc->req_depth * sizeof(void *);
1060 mem = kmalloc(sz, GFP_ATOMIC);
1061 if (mem == NULL) {
1062 error = -ENOMEM;
1063 goto mptsas_probe_failed;
1064 }
1065
1066 memset(mem, 0, sz);
1067 hd->ScsiLookup = (struct scsi_cmnd **) mem;
1068
1069 dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n",
1070 ioc->name, hd->ScsiLookup, sz));
1071
1072 /* Allocate memory for the device structures.
1073 * A non-Null pointer at an offset
1074 * indicates a device exists.
1075 * max_id = 1 + maximum id (hosts.h)
1076 */
1077 sz = sh->max_id * sizeof(void *);
1078 mem = kmalloc(sz, GFP_ATOMIC);
1079 if (mem == NULL) {
1080 error = -ENOMEM;
1081 goto mptsas_probe_failed;
1082 }
1083
1084 memset(mem, 0, sz);
1085 hd->Targets = (VirtDevice **) mem;
1086
1087 dprintk((KERN_INFO
1088 " Targets @ %p, sz=%d\n", hd->Targets, sz));
1089
1090 /* Clear the TM flags
1091 */
1092 hd->tmPending = 0;
1093 hd->tmState = TM_STATE_NONE;
1094 hd->resetPending = 0;
1095 hd->abortSCpnt = NULL;
1096
1097 /* Clear the pointer used to store
1098 * single-threaded commands, i.e., those
1099 * issued during a bus scan, dv and
1100 * configuration pages.
1101 */
1102 hd->cmdPtr = NULL;
1103
1104 /* Initialize this SCSI Hosts' timers
1105 * To use, set the timer expires field
1106 * and add_timer
1107 */
1108 init_timer(&hd->timer);
1109 hd->timer.data = (unsigned long) hd;
1110 hd->timer.function = mptscsih_timer_expired;
1111
1112 hd->mpt_pq_filter = mpt_pq_filter;
1113 ioc->sas_data.ptClear = mpt_pt_clear;
1114
1115 if (ioc->sas_data.ptClear==1) {
1116 mptbase_sas_persist_operation(
1117 ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
1118 }
1119
1120 ddvprintk((MYIOC_s_INFO_FMT
1121 "mpt_pq_filter %x mpt_pq_filter %x\n",
1122 ioc->name,
1123 mpt_pq_filter,
1124 mpt_pq_filter));
1125
1126 init_waitqueue_head(&hd->scandv_waitq);
1127 hd->scandv_wait_done = 0;
1128 hd->last_queue_full = 0;
1129
1130 error = scsi_add_host(sh, &ioc->pcidev->dev);
1131 if (error) {
1132 dprintk((KERN_ERR MYNAM
1133 "scsi_add_host failed\n"));
1134 goto mptsas_probe_failed;
1135 }
1136
1137 mptsas_scan_sas_topology(ioc);
1138
1139 return 0;
1140
1141mptsas_probe_failed:
1142
1143 mptscsih_remove(pdev);
1144 return error;
1145}
1146
1147static void __devexit mptsas_remove(struct pci_dev *pdev)
1148{
1149 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1150 struct mptsas_portinfo *p, *n;
1151
1152 sas_remove_host(ioc->sh);
1153
1154 list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
1155 list_del(&p->list);
1156 kfree(p);
1157 }
1158
1159 mptscsih_remove(pdev);
1160}
1161
1162static struct pci_device_id mptsas_pci_table[] = {
1163 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064,
1164 PCI_ANY_ID, PCI_ANY_ID },
1165 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066,
1166 PCI_ANY_ID, PCI_ANY_ID },
1167 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068,
1168 PCI_ANY_ID, PCI_ANY_ID },
1169 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064E,
1170 PCI_ANY_ID, PCI_ANY_ID },
1171 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1066E,
1172 PCI_ANY_ID, PCI_ANY_ID },
1173 { PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1068E,
1174 PCI_ANY_ID, PCI_ANY_ID },
1175 {0} /* Terminating entry */
1176};
1177MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
1178
1179
1180static struct pci_driver mptsas_driver = {
1181 .name = "mptsas",
1182 .id_table = mptsas_pci_table,
1183 .probe = mptsas_probe,
1184 .remove = __devexit_p(mptsas_remove),
1185 .shutdown = mptscsih_shutdown,
1186#ifdef CONFIG_PM
1187 .suspend = mptscsih_suspend,
1188 .resume = mptscsih_resume,
1189#endif
1190};
1191
1192static int __init
1193mptsas_init(void)
1194{
1195 show_mptmod_ver(my_NAME, my_VERSION);
1196
1197 mptsas_transport_template =
1198 sas_attach_transport(&mptsas_transport_functions);
1199 if (!mptsas_transport_template)
1200 return -ENODEV;
1201
1202 mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
1203 mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
1204 mptsasInternalCtx =
1205 mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
1206
1207 if (mpt_event_register(mptsasDoneCtx, mptscsih_event_process) == 0) {
1208 devtprintk((KERN_INFO MYNAM
1209 ": Registered for IOC event notifications\n"));
1210 }
1211
1212 if (mpt_reset_register(mptsasDoneCtx, mptscsih_ioc_reset) == 0) {
1213 dprintk((KERN_INFO MYNAM
1214 ": Registered for IOC reset notifications\n"));
1215 }
1216
1217 return pci_register_driver(&mptsas_driver);
1218}
1219
1220static void __exit
1221mptsas_exit(void)
1222{
1223 pci_unregister_driver(&mptsas_driver);
1224 sas_release_transport(mptsas_transport_template);
1225
1226 mpt_reset_deregister(mptsasDoneCtx);
1227 mpt_event_deregister(mptsasDoneCtx);
1228
1229 mpt_deregister(mptsasInternalCtx);
1230 mpt_deregister(mptsasTaskCtx);
1231 mpt_deregister(mptsasDoneCtx);
1232}
1233
1234module_init(mptsas_init);
1235module_exit(mptsas_exit);
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 4a003dc5fde8..5cb07eb224d7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -62,6 +62,7 @@
62#include <scsi/scsi_device.h> 62#include <scsi/scsi_device.h>
63#include <scsi/scsi_host.h> 63#include <scsi/scsi_host.h>
64#include <scsi/scsi_tcq.h> 64#include <scsi/scsi_tcq.h>
65#include <scsi/scsi_dbg.h>
65 66
66#include "mptbase.h" 67#include "mptbase.h"
67#include "mptscsih.h" 68#include "mptscsih.h"
@@ -93,8 +94,9 @@ typedef struct _BIG_SENSE_BUF {
93 94
94#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */ 95#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */
95#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */ 96#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */
96#define MPT_ICFLAG_PHYS_DISK 0x04 /* Any SCSI IO but do Phys Disk Format */ 97#define MPT_ICFLAG_EBOS 0x04 /* ReadBuffer Echo buffer has EBOS */
97#define MPT_ICFLAG_TAGGED_CMD 0x08 /* Do tagged IO */ 98#define MPT_ICFLAG_PHYS_DISK 0x08 /* Any SCSI IO but do Phys Disk Format */
99#define MPT_ICFLAG_TAGGED_CMD 0x10 /* Do tagged IO */
98#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */ 100#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */
99#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */ 101#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */
100 102
@@ -159,6 +161,8 @@ int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR
159static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); 161static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
160static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum); 162static int mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum);
161 163
164static struct work_struct mptscsih_persistTask;
165
162#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 166#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
163static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io); 167static int mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
164static void mptscsih_domainValidation(void *hd); 168static void mptscsih_domainValidation(void *hd);
@@ -167,6 +171,7 @@ static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
167static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target); 171static int mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
168static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage); 172static void mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
169static void mptscsih_fillbuf(char *buffer, int size, int index, int width); 173static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
174static void mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id);
170#endif 175#endif
171 176
172void mptscsih_remove(struct pci_dev *); 177void mptscsih_remove(struct pci_dev *);
@@ -606,11 +611,24 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
606 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); 611 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
607 sc->resid = sc->request_bufflen - xfer_cnt; 612 sc->resid = sc->request_bufflen - xfer_cnt;
608 613
614 /*
615 * if we get a data underrun indication, yet no data was
616 * transferred and the SCSI status indicates that the
617 * command was never started, change the data underrun
618 * to success
619 */
620 if (status == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
621 (scsi_status == MPI_SCSI_STATUS_BUSY ||
622 scsi_status == MPI_SCSI_STATUS_RESERVATION_CONFLICT ||
623 scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)) {
624 status = MPI_IOCSTATUS_SUCCESS;
625 }
626
609 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n" 627 dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
610 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n" 628 "IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
611 "resid=%d bufflen=%d xfer_cnt=%d\n", 629 "resid=%d bufflen=%d xfer_cnt=%d\n",
612 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1], 630 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
613 status, scsi_state, scsi_status, sc->resid, 631 status, scsi_state, scsi_status, sc->resid,
614 sc->request_bufflen, xfer_cnt)); 632 sc->request_bufflen, xfer_cnt));
615 633
616 if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) 634 if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
@@ -619,8 +637,11 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
619 /* 637 /*
620 * Look for + dump FCP ResponseInfo[]! 638 * Look for + dump FCP ResponseInfo[]!
621 */ 639 */
622 if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) { 640 if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
623 printk(KERN_NOTICE " FCP_ResponseInfo=%08xh\n", 641 pScsiReply->ResponseInfo) {
642 printk(KERN_NOTICE "ha=%d id=%d lun=%d: "
643 "FCP_ResponseInfo=%08xh\n",
644 ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
624 le32_to_cpu(pScsiReply->ResponseInfo)); 645 le32_to_cpu(pScsiReply->ResponseInfo));
625 } 646 }
626 647
@@ -661,23 +682,13 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
661 break; 682 break;
662 683
663 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 684 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
664 if ( xfer_cnt >= sc->underflow ) { 685 sc->resid = sc->request_bufflen - xfer_cnt;
665 /* Sufficient data transfer occurred */ 686 if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
687 sc->result=DID_SOFT_ERROR << 16;
688 else /* Sufficient data transfer occurred */
666 sc->result = (DID_OK << 16) | scsi_status; 689 sc->result = (DID_OK << 16) | scsi_status;
667 } else if ( xfer_cnt == 0 ) { 690 dreplyprintk((KERN_NOTICE
668 /* A CRC Error causes this condition; retry */ 691 "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
669 sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) |
670 (CHECK_CONDITION << 1);
671 sc->sense_buffer[0] = 0x70;
672 sc->sense_buffer[2] = NO_SENSE;
673 sc->sense_buffer[12] = 0;
674 sc->sense_buffer[13] = 0;
675 } else {
676 sc->result = DID_SOFT_ERROR << 16;
677 }
678 dreplyprintk((KERN_NOTICE
679 "RESIDUAL_MISMATCH: result=%x on id=%d\n",
680 sc->result, sc->device->id));
681 break; 692 break;
682 693
683 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ 694 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
@@ -692,7 +703,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
692 ; 703 ;
693 } else { 704 } else {
694 if (xfer_cnt < sc->underflow) { 705 if (xfer_cnt < sc->underflow) {
695 sc->result = DID_SOFT_ERROR << 16; 706 if (scsi_status == SAM_STAT_BUSY)
707 sc->result = SAM_STAT_BUSY;
708 else
709 sc->result = DID_SOFT_ERROR << 16;
696 } 710 }
697 if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) { 711 if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
698 /* What to do? 712 /* What to do?
@@ -717,8 +731,10 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
717 731
718 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ 732 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
719 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ 733 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
720 scsi_status = pScsiReply->SCSIStatus; 734 if (scsi_status == MPI_SCSI_STATUS_BUSY)
721 sc->result = (DID_OK << 16) | scsi_status; 735 sc->result = (DID_BUS_BUSY << 16) | scsi_status;
736 else
737 sc->result = (DID_OK << 16) | scsi_status;
722 if (scsi_state == 0) { 738 if (scsi_state == 0) {
723 ; 739 ;
724 } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) { 740 } else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
@@ -890,12 +906,13 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
890 SCSIIORequest_t *mf = NULL; 906 SCSIIORequest_t *mf = NULL;
891 int ii; 907 int ii;
892 int max = hd->ioc->req_depth; 908 int max = hd->ioc->req_depth;
909 struct scsi_cmnd *sc;
893 910
894 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n", 911 dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
895 target, lun, max)); 912 target, lun, max));
896 913
897 for (ii=0; ii < max; ii++) { 914 for (ii=0; ii < max; ii++) {
898 if (hd->ScsiLookup[ii] != NULL) { 915 if ((sc = hd->ScsiLookup[ii]) != NULL) {
899 916
900 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii); 917 mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
901 918
@@ -910,9 +927,22 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
910 hd->ScsiLookup[ii] = NULL; 927 hd->ScsiLookup[ii] = NULL;
911 mptscsih_freeChainBuffers(hd->ioc, ii); 928 mptscsih_freeChainBuffers(hd->ioc, ii);
912 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 929 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
930 if (sc->use_sg) {
931 pci_unmap_sg(hd->ioc->pcidev,
932 (struct scatterlist *) sc->request_buffer,
933 sc->use_sg,
934 sc->sc_data_direction);
935 } else if (sc->request_bufflen) {
936 pci_unmap_single(hd->ioc->pcidev,
937 sc->SCp.dma_handle,
938 sc->request_bufflen,
939 sc->sc_data_direction);
940 }
941 sc->host_scribble = NULL;
942 sc->result = DID_NO_CONNECT << 16;
943 sc->scsi_done(sc);
913 } 944 }
914 } 945 }
915
916 return; 946 return;
917} 947}
918 948
@@ -967,8 +997,10 @@ mptscsih_remove(struct pci_dev *pdev)
967 unsigned long flags; 997 unsigned long flags;
968 int sz1; 998 int sz1;
969 999
970 if(!host) 1000 if(!host) {
1001 mpt_detach(pdev);
971 return; 1002 return;
1003 }
972 1004
973 scsi_remove_host(host); 1005 scsi_remove_host(host);
974 1006
@@ -1256,8 +1288,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1256 MPT_SCSI_HOST *hd; 1288 MPT_SCSI_HOST *hd;
1257 MPT_FRAME_HDR *mf; 1289 MPT_FRAME_HDR *mf;
1258 SCSIIORequest_t *pScsiReq; 1290 SCSIIORequest_t *pScsiReq;
1259 VirtDevice *pTarget; 1291 VirtDevice *pTarget = SCpnt->device->hostdata;
1260 int target;
1261 int lun; 1292 int lun;
1262 u32 datalen; 1293 u32 datalen;
1263 u32 scsictl; 1294 u32 scsictl;
@@ -1267,12 +1298,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1267 int ii; 1298 int ii;
1268 1299
1269 hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata; 1300 hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata;
1270 target = SCpnt->device->id;
1271 lun = SCpnt->device->lun; 1301 lun = SCpnt->device->lun;
1272 SCpnt->scsi_done = done; 1302 SCpnt->scsi_done = done;
1273 1303
1274 pTarget = hd->Targets[target];
1275
1276 dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n", 1304 dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n",
1277 (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done)); 1305 (hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done));
1278 1306
@@ -1315,7 +1343,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1315 /* Default to untagged. Once a target structure has been allocated, 1343 /* Default to untagged. Once a target structure has been allocated,
1316 * use the Inquiry data to determine if device supports tagged. 1344 * use the Inquiry data to determine if device supports tagged.
1317 */ 1345 */
1318 if ( pTarget 1346 if (pTarget
1319 && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES) 1347 && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
1320 && (SCpnt->device->tagged_supported)) { 1348 && (SCpnt->device->tagged_supported)) {
1321 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ; 1349 scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
@@ -1325,8 +1353,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1325 1353
1326 /* Use the above information to set up the message frame 1354 /* Use the above information to set up the message frame
1327 */ 1355 */
1328 pScsiReq->TargetID = (u8) target; 1356 pScsiReq->TargetID = (u8) pTarget->target_id;
1329 pScsiReq->Bus = (u8) SCpnt->device->channel; 1357 pScsiReq->Bus = pTarget->bus_id;
1330 pScsiReq->ChainOffset = 0; 1358 pScsiReq->ChainOffset = 0;
1331 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 1359 pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1332 pScsiReq->CDBLength = SCpnt->cmd_len; 1360 pScsiReq->CDBLength = SCpnt->cmd_len;
@@ -1378,7 +1406,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1378 1406
1379#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 1407#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
1380 if (hd->ioc->bus_type == SCSI) { 1408 if (hd->ioc->bus_type == SCSI) {
1381 int dvStatus = hd->ioc->spi_data.dvStatus[target]; 1409 int dvStatus = hd->ioc->spi_data.dvStatus[pTarget->target_id];
1382 int issueCmd = 1; 1410 int issueCmd = 1;
1383 1411
1384 if (dvStatus || hd->ioc->spi_data.forceDv) { 1412 if (dvStatus || hd->ioc->spi_data.forceDv) {
@@ -1426,6 +1454,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1426 return 0; 1454 return 0;
1427 1455
1428 fail: 1456 fail:
1457 hd->ScsiLookup[my_idx] = NULL;
1429 mptscsih_freeChainBuffers(hd->ioc, my_idx); 1458 mptscsih_freeChainBuffers(hd->ioc, my_idx);
1430 mpt_free_msg_frame(hd->ioc, mf); 1459 mpt_free_msg_frame(hd->ioc, mf);
1431 return SCSI_MLQUEUE_HOST_BUSY; 1460 return SCSI_MLQUEUE_HOST_BUSY;
@@ -1713,24 +1742,23 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1713 MPT_FRAME_HDR *mf; 1742 MPT_FRAME_HDR *mf;
1714 u32 ctx2abort; 1743 u32 ctx2abort;
1715 int scpnt_idx; 1744 int scpnt_idx;
1745 int retval;
1716 1746
1717 /* If we can't locate our host adapter structure, return FAILED status. 1747 /* If we can't locate our host adapter structure, return FAILED status.
1718 */ 1748 */
1719 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) { 1749 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
1720 SCpnt->result = DID_RESET << 16; 1750 SCpnt->result = DID_RESET << 16;
1721 SCpnt->scsi_done(SCpnt); 1751 SCpnt->scsi_done(SCpnt);
1722 dfailprintk((KERN_WARNING MYNAM ": mptscsih_abort: " 1752 dfailprintk((KERN_INFO MYNAM ": mptscsih_abort: "
1723 "Can't locate host! (sc=%p)\n", 1753 "Can't locate host! (sc=%p)\n",
1724 SCpnt)); 1754 SCpnt));
1725 return FAILED; 1755 return FAILED;
1726 } 1756 }
1727 1757
1728 ioc = hd->ioc; 1758 ioc = hd->ioc;
1729 if (hd->resetPending) 1759 if (hd->resetPending) {
1730 return FAILED; 1760 return FAILED;
1731 1761 }
1732 printk(KERN_WARNING MYNAM ": %s: >> Attempting task abort! (sc=%p)\n",
1733 hd->ioc->name, SCpnt);
1734 1762
1735 if (hd->timeouts < -1) 1763 if (hd->timeouts < -1)
1736 hd->timeouts++; 1764 hd->timeouts++;
@@ -1738,16 +1766,20 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1738 /* Find this command 1766 /* Find this command
1739 */ 1767 */
1740 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { 1768 if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
1741 /* Cmd not found in ScsiLookup. 1769 /* Cmd not found in ScsiLookup.
1742 * Do OS callback. 1770 * Do OS callback.
1743 */ 1771 */
1744 SCpnt->result = DID_RESET << 16; 1772 SCpnt->result = DID_RESET << 16;
1745 dtmprintk((KERN_WARNING MYNAM ": %s: mptscsih_abort: " 1773 dtmprintk((KERN_INFO MYNAM ": %s: mptscsih_abort: "
1746 "Command not in the active list! (sc=%p)\n", 1774 "Command not in the active list! (sc=%p)\n",
1747 hd->ioc->name, SCpnt)); 1775 hd->ioc->name, SCpnt));
1748 return SUCCESS; 1776 return SUCCESS;
1749 } 1777 }
1750 1778
1779 printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n",
1780 hd->ioc->name, SCpnt);
1781 scsi_print_command(SCpnt);
1782
1751 /* Most important! Set TaskMsgContext to SCpnt's MsgContext! 1783 /* Most important! Set TaskMsgContext to SCpnt's MsgContext!
1752 * (the IO to be ABORT'd) 1784 * (the IO to be ABORT'd)
1753 * 1785 *
@@ -1760,38 +1792,22 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1760 1792
1761 hd->abortSCpnt = SCpnt; 1793 hd->abortSCpnt = SCpnt;
1762 1794
1763 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 1795 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
1764 SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun, 1796 SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
1765 ctx2abort, 2 /* 2 second timeout */) 1797 ctx2abort, 2 /* 2 second timeout */);
1766 < 0) {
1767 1798
1768 /* The TM request failed and the subsequent FW-reload failed! 1799 printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
1769 * Fatal error case. 1800 hd->ioc->name,
1770 */ 1801 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1771 printk(MYIOC_s_WARN_FMT "Error issuing abort task! (sc=%p)\n",
1772 hd->ioc->name, SCpnt);
1773 1802
1774 /* We must clear our pending flag before clearing our state. 1803 if (retval == 0)
1775 */ 1804 return SUCCESS;
1805
1806 if(retval != FAILED ) {
1776 hd->tmPending = 0; 1807 hd->tmPending = 0;
1777 hd->tmState = TM_STATE_NONE; 1808 hd->tmState = TM_STATE_NONE;
1778
1779 /* Unmap the DMA buffers, if any. */
1780 if (SCpnt->use_sg) {
1781 pci_unmap_sg(ioc->pcidev, (struct scatterlist *) SCpnt->request_buffer,
1782 SCpnt->use_sg, SCpnt->sc_data_direction);
1783 } else if (SCpnt->request_bufflen) {
1784 pci_unmap_single(ioc->pcidev, SCpnt->SCp.dma_handle,
1785 SCpnt->request_bufflen, SCpnt->sc_data_direction);
1786 }
1787 hd->ScsiLookup[scpnt_idx] = NULL;
1788 SCpnt->result = DID_RESET << 16;
1789 SCpnt->scsi_done(SCpnt); /* Issue the command callback */
1790 mptscsih_freeChainBuffers(ioc, scpnt_idx);
1791 mpt_free_msg_frame(ioc, mf);
1792 return FAILED;
1793 } 1809 }
1794 return SUCCESS; 1810 return FAILED;
1795} 1811}
1796 1812
1797/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1813/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1807,11 +1823,12 @@ int
1807mptscsih_dev_reset(struct scsi_cmnd * SCpnt) 1823mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1808{ 1824{
1809 MPT_SCSI_HOST *hd; 1825 MPT_SCSI_HOST *hd;
1826 int retval;
1810 1827
1811 /* If we can't locate our host adapter structure, return FAILED status. 1828 /* If we can't locate our host adapter structure, return FAILED status.
1812 */ 1829 */
1813 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){ 1830 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
1814 dtmprintk((KERN_WARNING MYNAM ": mptscsih_dev_reset: " 1831 dtmprintk((KERN_INFO MYNAM ": mptscsih_dev_reset: "
1815 "Can't locate host! (sc=%p)\n", 1832 "Can't locate host! (sc=%p)\n",
1816 SCpnt)); 1833 SCpnt));
1817 return FAILED; 1834 return FAILED;
@@ -1820,24 +1837,26 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1820 if (hd->resetPending) 1837 if (hd->resetPending)
1821 return FAILED; 1838 return FAILED;
1822 1839
1823 printk(KERN_WARNING MYNAM ": %s: >> Attempting target reset! (sc=%p)\n", 1840 printk(KERN_WARNING MYNAM ": %s: attempting target reset! (sc=%p)\n",
1824 hd->ioc->name, SCpnt); 1841 hd->ioc->name, SCpnt);
1842 scsi_print_command(SCpnt);
1825 1843
1826 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1844 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
1827 SCpnt->device->channel, SCpnt->device->id, 1845 SCpnt->device->channel, SCpnt->device->id,
1828 0, 0, 5 /* 5 second timeout */) 1846 0, 0, 5 /* 5 second timeout */);
1829 < 0){ 1847
1830 /* The TM request failed and the subsequent FW-reload failed! 1848 printk (KERN_WARNING MYNAM ": %s: target reset: %s (sc=%p)\n",
1831 * Fatal error case. 1849 hd->ioc->name,
1832 */ 1850 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1833 printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n", 1851
1834 hd->ioc->name, SCpnt); 1852 if (retval == 0)
1853 return SUCCESS;
1854
1855 if(retval != FAILED ) {
1835 hd->tmPending = 0; 1856 hd->tmPending = 0;
1836 hd->tmState = TM_STATE_NONE; 1857 hd->tmState = TM_STATE_NONE;
1837 return FAILED;
1838 } 1858 }
1839 1859 return FAILED;
1840 return SUCCESS;
1841} 1860}
1842 1861
1843/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1862/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1853,41 +1872,39 @@ int
1853mptscsih_bus_reset(struct scsi_cmnd * SCpnt) 1872mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
1854{ 1873{
1855 MPT_SCSI_HOST *hd; 1874 MPT_SCSI_HOST *hd;
1856 spinlock_t *host_lock = SCpnt->device->host->host_lock; 1875 int retval;
1857 1876
1858 /* If we can't locate our host adapter structure, return FAILED status. 1877 /* If we can't locate our host adapter structure, return FAILED status.
1859 */ 1878 */
1860 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){ 1879 if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
1861 dtmprintk((KERN_WARNING MYNAM ": mptscsih_bus_reset: " 1880 dtmprintk((KERN_INFO MYNAM ": mptscsih_bus_reset: "
1862 "Can't locate host! (sc=%p)\n", 1881 "Can't locate host! (sc=%p)\n",
1863 SCpnt ) ); 1882 SCpnt ) );
1864 return FAILED; 1883 return FAILED;
1865 } 1884 }
1866 1885
1867 printk(KERN_WARNING MYNAM ": %s: >> Attempting bus reset! (sc=%p)\n", 1886 printk(KERN_WARNING MYNAM ": %s: attempting bus reset! (sc=%p)\n",
1868 hd->ioc->name, SCpnt); 1887 hd->ioc->name, SCpnt);
1888 scsi_print_command(SCpnt);
1869 1889
1870 if (hd->timeouts < -1) 1890 if (hd->timeouts < -1)
1871 hd->timeouts++; 1891 hd->timeouts++;
1872 1892
1873 /* We are now ready to execute the task management request. */ 1893 retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1874 if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1894 SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */);
1875 SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */)
1876 < 0){
1877 1895
1878 /* The TM request failed and the subsequent FW-reload failed! 1896 printk (KERN_WARNING MYNAM ": %s: bus reset: %s (sc=%p)\n",
1879 * Fatal error case. 1897 hd->ioc->name,
1880 */ 1898 ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
1881 printk(MYIOC_s_WARN_FMT 1899
1882 "Error processing TaskMgmt request (sc=%p)\n", 1900 if (retval == 0)
1883 hd->ioc->name, SCpnt); 1901 return SUCCESS;
1902
1903 if(retval != FAILED ) {
1884 hd->tmPending = 0; 1904 hd->tmPending = 0;
1885 hd->tmState = TM_STATE_NONE; 1905 hd->tmState = TM_STATE_NONE;
1886 spin_lock_irq(host_lock);
1887 return FAILED;
1888 } 1906 }
1889 1907 return FAILED;
1890 return SUCCESS;
1891} 1908}
1892 1909
1893/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1910/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2169,7 +2186,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
2169 vdev->raidVolume = 0; 2186 vdev->raidVolume = 0;
2170 hd->Targets[device->id] = vdev; 2187 hd->Targets[device->id] = vdev;
2171 if (hd->ioc->bus_type == SCSI) { 2188 if (hd->ioc->bus_type == SCSI) {
2172 if (hd->ioc->spi_data.isRaid & (1 << device->id)) { 2189 if (hd->ioc->raid_data.isRaid & (1 << device->id)) {
2173 vdev->raidVolume = 1; 2190 vdev->raidVolume = 1;
2174 ddvtprintk((KERN_INFO 2191 ddvtprintk((KERN_INFO
2175 "RAID Volume @ id %d\n", device->id)); 2192 "RAID Volume @ id %d\n", device->id));
@@ -2180,22 +2197,7 @@ mptscsih_slave_alloc(struct scsi_device *device)
2180 2197
2181 out: 2198 out:
2182 vdev->num_luns++; 2199 vdev->num_luns++;
2183 return 0; 2200 device->hostdata = vdev;
2184}
2185
2186static int
2187mptscsih_is_raid_volume(MPT_SCSI_HOST *hd, uint id)
2188{
2189 int i;
2190
2191 if (!hd->ioc->spi_data.isRaid || !hd->ioc->spi_data.pIocPg3)
2192 return 0;
2193
2194 for (i = 0; i < hd->ioc->spi_data.pIocPg3->NumPhysDisks; i++) {
2195 if (id == hd->ioc->spi_data.pIocPg3->PhysDisk[i].PhysDiskID)
2196 return 1;
2197 }
2198
2199 return 0; 2201 return 0;
2200} 2202}
2201 2203
@@ -2226,7 +2228,7 @@ mptscsih_slave_destroy(struct scsi_device *device)
2226 hd->Targets[target] = NULL; 2228 hd->Targets[target] = NULL;
2227 2229
2228 if (hd->ioc->bus_type == SCSI) { 2230 if (hd->ioc->bus_type == SCSI) {
2229 if (mptscsih_is_raid_volume(hd, target)) { 2231 if (mptscsih_is_phys_disk(hd->ioc, target)) {
2230 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3; 2232 hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
2231 } else { 2233 } else {
2232 hd->ioc->spi_data.dvStatus[target] = 2234 hd->ioc->spi_data.dvStatus[target] =
@@ -2439,6 +2441,7 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2439{ 2441{
2440 MPT_SCSI_HOST *hd; 2442 MPT_SCSI_HOST *hd;
2441 unsigned long flags; 2443 unsigned long flags;
2444 int ii;
2442 2445
2443 dtmprintk((KERN_WARNING MYNAM 2446 dtmprintk((KERN_WARNING MYNAM
2444 ": IOC %s_reset routed to SCSI host driver!\n", 2447 ": IOC %s_reset routed to SCSI host driver!\n",
@@ -2496,11 +2499,8 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2496 2499
2497 /* ScsiLookup initialization 2500 /* ScsiLookup initialization
2498 */ 2501 */
2499 { 2502 for (ii=0; ii < hd->ioc->req_depth; ii++)
2500 int ii; 2503 hd->ScsiLookup[ii] = NULL;
2501 for (ii=0; ii < hd->ioc->req_depth; ii++)
2502 hd->ScsiLookup[ii] = NULL;
2503 }
2504 2504
2505 /* 2. Chain Buffer initialization 2505 /* 2. Chain Buffer initialization
2506 */ 2506 */
@@ -2549,6 +2549,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
2549} 2549}
2550 2550
2551/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 2551/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2552/* work queue thread to clear the persitency table */
2553static void
2554mptscsih_sas_persist_clear_table(void * arg)
2555{
2556 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
2557
2558 mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
2559}
2560
2561/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
2552int 2562int
2553mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 2563mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2554{ 2564{
@@ -2558,18 +2568,18 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2558 devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", 2568 devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
2559 ioc->name, event)); 2569 ioc->name, event));
2560 2570
2571 if (ioc->sh == NULL ||
2572 ((hd = (MPT_SCSI_HOST *)ioc->sh->hostdata) == NULL))
2573 return 1;
2574
2561 switch (event) { 2575 switch (event) {
2562 case MPI_EVENT_UNIT_ATTENTION: /* 03 */ 2576 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
2563 /* FIXME! */ 2577 /* FIXME! */
2564 break; 2578 break;
2565 case MPI_EVENT_IOC_BUS_RESET: /* 04 */ 2579 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
2566 case MPI_EVENT_EXT_BUS_RESET: /* 05 */ 2580 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
2567 hd = NULL; 2581 if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
2568 if (ioc->sh) { 2582 hd->soft_resets++;
2569 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2570 if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
2571 hd->soft_resets++;
2572 }
2573 break; 2583 break;
2574 case MPI_EVENT_LOGOUT: /* 09 */ 2584 case MPI_EVENT_LOGOUT: /* 09 */
2575 /* FIXME! */ 2585 /* FIXME! */
@@ -2588,69 +2598,24 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
2588 break; 2598 break;
2589 2599
2590 case MPI_EVENT_INTEGRATED_RAID: /* 0B */ 2600 case MPI_EVENT_INTEGRATED_RAID: /* 0B */
2601 {
2602 pMpiEventDataRaid_t pRaidEventData =
2603 (pMpiEventDataRaid_t) pEvReply->Data;
2591#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION 2604#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
2592 /* negoNvram set to 0 if DV enabled and to USE_NVRAM if 2605 /* Domain Validation Needed */
2593 * if DV disabled. Need to check for target mode. 2606 if (ioc->bus_type == SCSI &&
2594 */ 2607 pRaidEventData->ReasonCode ==
2595 hd = NULL; 2608 MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED)
2596 if (ioc->sh) 2609 mptscsih_set_dvflags_raid(hd, pRaidEventData->PhysDiskNum);
2597 hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
2598
2599 if (hd && (ioc->bus_type == SCSI) && (hd->negoNvram == 0)) {
2600 ScsiCfgData *pSpi;
2601 Ioc3PhysDisk_t *pPDisk;
2602 int numPDisk;
2603 u8 reason;
2604 u8 physDiskNum;
2605
2606 reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
2607 if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
2608 /* New or replaced disk.
2609 * Set DV flag and schedule DV.
2610 */
2611 pSpi = &ioc->spi_data;
2612 physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
2613 ddvtprintk(("DV requested for phys disk id %d\n", physDiskNum));
2614 if (pSpi->pIocPg3) {
2615 pPDisk = pSpi->pIocPg3->PhysDisk;
2616 numPDisk =pSpi->pIocPg3->NumPhysDisks;
2617
2618 while (numPDisk) {
2619 if (physDiskNum == pPDisk->PhysDiskNum) {
2620 pSpi->dvStatus[pPDisk->PhysDiskID] = (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
2621 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
2622 ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
2623 break;
2624 }
2625 pPDisk++;
2626 numPDisk--;
2627 }
2628
2629 if (numPDisk == 0) {
2630 /* The physical disk that needs DV was not found
2631 * in the stored IOC Page 3. The driver must reload
2632 * this page. DV routine will set the NEED_DV flag for
2633 * all phys disks that have DV_NOT_DONE set.
2634 */
2635 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
2636 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n", physDiskNum));
2637 }
2638 }
2639 }
2640 }
2641#endif 2610#endif
2611 break;
2612 }
2642 2613
2643#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY) 2614 /* Persistent table is full. */
2644 printk("Raid Event RF: "); 2615 case MPI_EVENT_PERSISTENT_TABLE_FULL:
2645 { 2616 INIT_WORK(&mptscsih_persistTask,
2646 u32 *m = (u32 *)pEvReply; 2617 mptscsih_sas_persist_clear_table,(void *)ioc);
2647 int ii; 2618 schedule_work(&mptscsih_persistTask);
2648 int n = (int)pEvReply->MsgLength;
2649 for (ii=6; ii < n; ii++)
2650 printk(" %08x", le32_to_cpu(m[ii]));
2651 printk("\n");
2652 }
2653#endif
2654 break; 2619 break;
2655 2620
2656 case MPI_EVENT_NONE: /* 00 */ 2621 case MPI_EVENT_NONE: /* 00 */
@@ -2687,7 +2652,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2687{ 2652{
2688 int indexed_lun, lun_index; 2653 int indexed_lun, lun_index;
2689 VirtDevice *vdev; 2654 VirtDevice *vdev;
2690 ScsiCfgData *pSpi; 2655 SpiCfgData *pSpi;
2691 char data_56; 2656 char data_56;
2692 2657
2693 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n", 2658 dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
@@ -2794,7 +2759,7 @@ mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *
2794static void 2759static void
2795mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56) 2760mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
2796{ 2761{
2797 ScsiCfgData *pspi_data = &hd->ioc->spi_data; 2762 SpiCfgData *pspi_data = &hd->ioc->spi_data;
2798 int id = (int) target->target_id; 2763 int id = (int) target->target_id;
2799 int nvram; 2764 int nvram;
2800 VirtDevice *vdev; 2765 VirtDevice *vdev;
@@ -2973,11 +2938,13 @@ mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
2973static void 2938static void
2974mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq) 2939mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
2975{ 2940{
2941 MPT_ADAPTER *ioc = hd->ioc;
2976 u8 cmd; 2942 u8 cmd;
2977 ScsiCfgData *pSpi; 2943 SpiCfgData *pSpi;
2978 2944
2979 ddvtprintk((" set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n", 2945 ddvtprintk((MYIOC_s_NOTE_FMT
2980 pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0])); 2946 " set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n",
2947 hd->ioc->name, pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
2981 2948
2982 if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0)) 2949 if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
2983 return; 2950 return;
@@ -2985,12 +2952,12 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
2985 cmd = pReq->CDB[0]; 2952 cmd = pReq->CDB[0];
2986 2953
2987 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) { 2954 if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
2988 pSpi = &hd->ioc->spi_data; 2955 pSpi = &ioc->spi_data;
2989 if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) { 2956 if ((ioc->raid_data.isRaid & (1 << pReq->TargetID)) && ioc->raid_data.pIocPg3) {
2990 /* Set NEED_DV for all hidden disks 2957 /* Set NEED_DV for all hidden disks
2991 */ 2958 */
2992 Ioc3PhysDisk_t *pPDisk = pSpi->pIocPg3->PhysDisk; 2959 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
2993 int numPDisk = pSpi->pIocPg3->NumPhysDisks; 2960 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
2994 2961
2995 while (numPDisk) { 2962 while (numPDisk) {
2996 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV; 2963 pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
@@ -3004,6 +2971,50 @@ mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
3004 } 2971 }
3005} 2972}
3006 2973
2974/* mptscsih_raid_set_dv_flags()
2975 *
2976 * New or replaced disk. Set DV flag and schedule DV.
2977 */
2978static void
2979mptscsih_set_dvflags_raid(MPT_SCSI_HOST *hd, int id)
2980{
2981 MPT_ADAPTER *ioc = hd->ioc;
2982 SpiCfgData *pSpi = &ioc->spi_data;
2983 Ioc3PhysDisk_t *pPDisk;
2984 int numPDisk;
2985
2986 if (hd->negoNvram != 0)
2987 return;
2988
2989 ddvtprintk(("DV requested for phys disk id %d\n", id));
2990 if (ioc->raid_data.pIocPg3) {
2991 pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
2992 numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
2993 while (numPDisk) {
2994 if (id == pPDisk->PhysDiskNum) {
2995 pSpi->dvStatus[pPDisk->PhysDiskID] =
2996 (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
2997 pSpi->forceDv = MPT_SCSICFG_NEED_DV;
2998 ddvtprintk(("NEED_DV set for phys disk id %d\n",
2999 pPDisk->PhysDiskID));
3000 break;
3001 }
3002 pPDisk++;
3003 numPDisk--;
3004 }
3005
3006 if (numPDisk == 0) {
3007 /* The physical disk that needs DV was not found
3008 * in the stored IOC Page 3. The driver must reload
3009 * this page. DV routine will set the NEED_DV flag for
3010 * all phys disks that have DV_NOT_DONE set.
3011 */
3012 pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
3013 ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n",id));
3014 }
3015 }
3016}
3017
3007/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 3018/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
3008/* 3019/*
3009 * If no Target, bus reset on 1st I/O. Set the flag to 3020 * If no Target, bus reset on 1st I/O. Set the flag to
@@ -3091,7 +3102,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3091 MPT_ADAPTER *ioc = hd->ioc; 3102 MPT_ADAPTER *ioc = hd->ioc;
3092 Config_t *pReq; 3103 Config_t *pReq;
3093 SCSIDevicePage1_t *pData; 3104 SCSIDevicePage1_t *pData;
3094 VirtDevice *pTarget; 3105 VirtDevice *pTarget=NULL;
3095 MPT_FRAME_HDR *mf; 3106 MPT_FRAME_HDR *mf;
3096 dma_addr_t dataDma; 3107 dma_addr_t dataDma;
3097 u16 req_idx; 3108 u16 req_idx;
@@ -3190,7 +3201,7 @@ mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
3190#endif 3201#endif
3191 3202
3192 if (flags & MPT_SCSICFG_BLK_NEGO) 3203 if (flags & MPT_SCSICFG_BLK_NEGO)
3193 negoFlags = MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC; 3204 negoFlags |= MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
3194 3205
3195 mptscsih_setDevicePage1Flags(width, factor, offset, 3206 mptscsih_setDevicePage1Flags(width, factor, offset,
3196 &requested, &configuration, negoFlags); 3207 &requested, &configuration, negoFlags);
@@ -4011,7 +4022,7 @@ mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
4011 4022
4012 /* If target Ptr NULL or if this target is NOT a disk, skip. 4023 /* If target Ptr NULL or if this target is NOT a disk, skip.
4013 */ 4024 */
4014 if ((pTarget) && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)){ 4025 if ((pTarget) && (pTarget->inq_data[0] == TYPE_DISK)){
4015 for (lun=0; lun <= MPT_LAST_LUN; lun++) { 4026 for (lun=0; lun <= MPT_LAST_LUN; lun++) {
4016 /* If LUN present, issue the command 4027 /* If LUN present, issue the command
4017 */ 4028 */
@@ -4106,9 +4117,9 @@ mptscsih_domainValidation(void *arg)
4106 4117
4107 if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) { 4118 if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) {
4108 mpt_read_ioc_pg_3(ioc); 4119 mpt_read_ioc_pg_3(ioc);
4109 if (ioc->spi_data.pIocPg3) { 4120 if (ioc->raid_data.pIocPg3) {
4110 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; 4121 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
4111 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; 4122 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
4112 4123
4113 while (numPDisk) { 4124 while (numPDisk) {
4114 if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE) 4125 if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE)
@@ -4147,7 +4158,7 @@ mptscsih_domainValidation(void *arg)
4147 isPhysDisk = mptscsih_is_phys_disk(ioc, id); 4158 isPhysDisk = mptscsih_is_phys_disk(ioc, id);
4148 if (isPhysDisk) { 4159 if (isPhysDisk) {
4149 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 4160 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4150 if (hd->ioc->spi_data.isRaid & (1 << ii)) { 4161 if (hd->ioc->raid_data.isRaid & (1 << ii)) {
4151 hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING; 4162 hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING;
4152 } 4163 }
4153 } 4164 }
@@ -4166,7 +4177,7 @@ mptscsih_domainValidation(void *arg)
4166 4177
4167 if (isPhysDisk) { 4178 if (isPhysDisk) {
4168 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) { 4179 for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
4169 if (hd->ioc->spi_data.isRaid & (1 << ii)) { 4180 if (hd->ioc->raid_data.isRaid & (1 << ii)) {
4170 hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING; 4181 hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING;
4171 } 4182 }
4172 } 4183 }
@@ -4188,21 +4199,21 @@ mptscsih_domainValidation(void *arg)
4188 4199
4189/* Search IOC page 3 to determine if this is hidden physical disk 4200/* Search IOC page 3 to determine if this is hidden physical disk
4190 */ 4201 */
4191static int 4202/* Search IOC page 3 to determine if this is hidden physical disk
4203 */
4204static int
4192mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id) 4205mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
4193{ 4206{
4194 if (ioc->spi_data.pIocPg3) { 4207 int i;
4195 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
4196 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
4197 4208
4198 while (numPDisk) { 4209 if (!ioc->raid_data.isRaid || !ioc->raid_data.pIocPg3)
4199 if (pPDisk->PhysDiskID == id) { 4210 return 0;
4200 return 1; 4211
4201 } 4212 for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
4202 pPDisk++; 4213 if (id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID)
4203 numPDisk--; 4214 return 1;
4204 }
4205 } 4215 }
4216
4206 return 0; 4217 return 0;
4207} 4218}
4208 4219
@@ -4408,7 +4419,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4408 /* Skip this ID? Set cfg.cfghdr.hdr to force config page write 4419 /* Skip this ID? Set cfg.cfghdr.hdr to force config page write
4409 */ 4420 */
4410 { 4421 {
4411 ScsiCfgData *pspi_data = &hd->ioc->spi_data; 4422 SpiCfgData *pspi_data = &hd->ioc->spi_data;
4412 if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) { 4423 if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
4413 /* Set the factor from nvram */ 4424 /* Set the factor from nvram */
4414 nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8; 4425 nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8;
@@ -4438,11 +4449,11 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4438 } 4449 }
4439 4450
4440 /* Finish iocmd inititialization - hidden or visible disk? */ 4451 /* Finish iocmd inititialization - hidden or visible disk? */
4441 if (ioc->spi_data.pIocPg3) { 4452 if (ioc->raid_data.pIocPg3) {
4442 /* Search IOC page 3 for matching id 4453 /* Search IOC page 3 for matching id
4443 */ 4454 */
4444 Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk; 4455 Ioc3PhysDisk_t *pPDisk = ioc->raid_data.pIocPg3->PhysDisk;
4445 int numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks; 4456 int numPDisk = ioc->raid_data.pIocPg3->NumPhysDisks;
4446 4457
4447 while (numPDisk) { 4458 while (numPDisk) {
4448 if (pPDisk->PhysDiskID == id) { 4459 if (pPDisk->PhysDiskID == id) {
@@ -4466,7 +4477,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4466 /* RAID Volume ID's may double for a physical device. If RAID but 4477 /* RAID Volume ID's may double for a physical device. If RAID but
4467 * not a physical ID as well, skip DV. 4478 * not a physical ID as well, skip DV.
4468 */ 4479 */
4469 if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK)) 4480 if ((hd->ioc->raid_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
4470 goto target_done; 4481 goto target_done;
4471 4482
4472 4483
@@ -4815,6 +4826,8 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4815 notDone = 0; 4826 notDone = 0;
4816 if (iocmd.flags & MPT_ICFLAG_ECHO) { 4827 if (iocmd.flags & MPT_ICFLAG_ECHO) {
4817 bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3]; 4828 bufsize = ((pbuf1[2] & 0x1F) <<8) | pbuf1[3];
4829 if (pbuf1[0] & 0x01)
4830 iocmd.flags |= MPT_ICFLAG_EBOS;
4818 } else { 4831 } else {
4819 bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3]; 4832 bufsize = pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3];
4820 } 4833 }
@@ -4911,6 +4924,9 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4911 } 4924 }
4912 iocmd.flags &= ~MPT_ICFLAG_DID_RESET; 4925 iocmd.flags &= ~MPT_ICFLAG_DID_RESET;
4913 4926
4927 if (iocmd.flags & MPT_ICFLAG_EBOS)
4928 goto skip_Reserve;
4929
4914 repeat = 5; 4930 repeat = 5;
4915 while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) { 4931 while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) {
4916 iocmd.cmd = RESERVE; 4932 iocmd.cmd = RESERVE;
@@ -4954,6 +4970,7 @@ mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
4954 } 4970 }
4955 } 4971 }
4956 4972
4973skip_Reserve:
4957 mptscsih_fillbuf(pbuf1, sz, patt, 1); 4974 mptscsih_fillbuf(pbuf1, sz, patt, 1);
4958 iocmd.cmd = WRITE_BUFFER; 4975 iocmd.cmd = WRITE_BUFFER;
4959 iocmd.data_dma = buf1_dma; 4976 iocmd.data_dma = buf1_dma;
@@ -5198,11 +5215,12 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5198 * If not an LVD bus, the adapter minSyncFactor has been 5215 * If not an LVD bus, the adapter minSyncFactor has been
5199 * already throttled back. 5216 * already throttled back.
5200 */ 5217 */
5218 negoFlags = hd->ioc->spi_data.noQas;
5201 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) { 5219 if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) {
5202 width = pTarget->maxWidth; 5220 width = pTarget->maxWidth;
5203 offset = pTarget->maxOffset; 5221 offset = pTarget->maxOffset;
5204 factor = pTarget->minSyncFactor; 5222 factor = pTarget->minSyncFactor;
5205 negoFlags = pTarget->negoFlags; 5223 negoFlags |= pTarget->negoFlags;
5206 } else { 5224 } else {
5207 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) { 5225 if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
5208 data = hd->ioc->spi_data.nvram[id]; 5226 data = hd->ioc->spi_data.nvram[id];
@@ -5223,7 +5241,6 @@ mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
5223 } 5241 }
5224 5242
5225 /* Set the negotiation flags */ 5243 /* Set the negotiation flags */
5226 negoFlags = hd->ioc->spi_data.noQas;
5227 if (!width) 5244 if (!width)
5228 negoFlags |= MPT_TARGET_NO_NEGO_WIDE; 5245 negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
5229 5246
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 51c0255ac16e..971fda4b8b57 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/message/fusion/mptscsi.h 2 * linux/drivers/message/fusion/mptscsih.h
3 * High performance SCSI / Fibre Channel SCSI Host device driver. 3 * High performance SCSI / Fibre Channel SCSI Host device driver.
4 * For use with PCI chip/adapter(s): 4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel 5 * LSIFC9xx/LSI409xx Fibre Channel
@@ -53,8 +53,8 @@
53 * SCSI Public stuff... 53 * SCSI Public stuff...
54 */ 54 */
55 55
56#define MPT_SCSI_CMD_PER_DEV_HIGH 31 56#define MPT_SCSI_CMD_PER_DEV_HIGH 64
57#define MPT_SCSI_CMD_PER_DEV_LOW 7 57#define MPT_SCSI_CMD_PER_DEV_LOW 32
58 58
59#define MPT_SCSI_CMD_PER_LUN 7 59#define MPT_SCSI_CMD_PER_LUN 7
60 60
@@ -77,6 +77,7 @@
77#define MPTSCSIH_MAX_WIDTH 1 77#define MPTSCSIH_MAX_WIDTH 1
78#define MPTSCSIH_MIN_SYNC 0x08 78#define MPTSCSIH_MIN_SYNC 0x08
79#define MPTSCSIH_SAF_TE 0 79#define MPTSCSIH_SAF_TE 0
80#define MPTSCSIH_PT_CLEAR 0
80 81
81 82
82#endif 83#endif
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 587d1274fd74..5c0e307d1d5d 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -199,7 +199,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
199 printk(MYIOC_s_WARN_FMT 199 printk(MYIOC_s_WARN_FMT
200 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n", 200 "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
201 ioc->name, ioc); 201 ioc->name, ioc);
202 return -ENODEV; 202 return 0;
203 } 203 }
204 204
205 sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST)); 205 sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST));
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
index af32ab4e90cd..10432f665201 100644
--- a/drivers/message/i2o/config-osm.c
+++ b/drivers/message/i2o/config-osm.c
@@ -56,8 +56,11 @@ static int __init i2o_config_init(void)
56 return -EBUSY; 56 return -EBUSY;
57 } 57 }
58#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL 58#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
59 if (i2o_config_old_init()) 59 if (i2o_config_old_init()) {
60 osm_err("old config handler initialization failed\n");
60 i2o_driver_unregister(&i2o_config_driver); 61 i2o_driver_unregister(&i2o_config_driver);
62 return -EBUSY;
63 }
61#endif 64#endif
62 65
63 return 0; 66 return 0;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index a851d65c7cfe..a260f83bcb02 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -48,8 +48,8 @@ struct ucb1x00_ts {
48 u16 x_res; 48 u16 x_res;
49 u16 y_res; 49 u16 y_res;
50 50
51 int restart:1; 51 unsigned int restart:1;
52 int adcsync:1; 52 unsigned int adcsync:1;
53}; 53};
54 54
55static int adcsync; 55static int adcsync;
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 9a087c1fb0b7..24f670b5a4f3 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -40,7 +40,7 @@
40#include <linux/mtd/mtd.h> 40#include <linux/mtd/mtd.h>
41#include <linux/mtd/doc2000.h> 41#include <linux/mtd/doc2000.h>
42 42
43#define DEBUG 0 43#define DEBUG_ECC 0
44/* need to undef it (from asm/termbits.h) */ 44/* need to undef it (from asm/termbits.h) */
45#undef B0 45#undef B0
46 46
@@ -249,7 +249,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
249 lambda[j] ^= Alpha_to[modnn(u + tmp)]; 249 lambda[j] ^= Alpha_to[modnn(u + tmp)];
250 } 250 }
251 } 251 }
252#if DEBUG >= 1 252#if DEBUG_ECC >= 1
253 /* Test code that verifies the erasure locator polynomial just constructed 253 /* Test code that verifies the erasure locator polynomial just constructed
254 Needed only for decoder debugging. */ 254 Needed only for decoder debugging. */
255 255
@@ -276,7 +276,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
276 count = -1; 276 count = -1;
277 goto finish; 277 goto finish;
278 } 278 }
279#if DEBUG >= 2 279#if DEBUG_ECC >= 2
280 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n"); 280 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
281 for (i = 0; i < count; i++) 281 for (i = 0; i < count; i++)
282 printf("%d ", loc[i]); 282 printf("%d ", loc[i]);
@@ -409,7 +409,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
409 den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])]; 409 den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
410 } 410 }
411 if (den == 0) { 411 if (den == 0) {
412#if DEBUG >= 1 412#if DEBUG_ECC >= 1
413 printf("\n ERROR: denominator = 0\n"); 413 printf("\n ERROR: denominator = 0\n");
414#endif 414#endif
415 /* Convert to dual- basis */ 415 /* Convert to dual- basis */
diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c
index 0c45464e3f7b..0ba0ff7d43b9 100644
--- a/drivers/mtd/maps/bast-flash.c
+++ b/drivers/mtd/maps/bast-flash.c
@@ -39,7 +39,6 @@
39#include <linux/mtd/partitions.h> 39#include <linux/mtd/partitions.h>
40 40
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/mach-types.h>
43#include <asm/mach/flash.h> 42#include <asm/mach/flash.h>
44 43
45#include <asm/arch/map.h> 44#include <asm/arch/map.h>
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 3e94b616743d..a9f86c7fbd52 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -30,7 +30,6 @@
30 30
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/hardware.h> 32#include <asm/hardware.h>
33#include <asm/mach-types.h>
34#include <asm/mach/flash.h> 33#include <asm/mach/flash.h>
35 34
36#include <linux/reboot.h> 35#include <linux/reboot.h>
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 5afe660aa2c4..3fcc32884074 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -26,7 +26,6 @@
26#include <linux/ioport.h> 26#include <linux/ioport.h>
27#include <linux/device.h> 27#include <linux/device.h>
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/mach-types.h>
30#include <asm/mach/flash.h> 29#include <asm/mach/flash.h>
31 30
32#include <linux/reboot.h> 31#include <linux/reboot.h>
@@ -254,6 +253,6 @@ module_init(ixp4xx_flash_init);
254module_exit(ixp4xx_flash_exit); 253module_exit(ixp4xx_flash_exit);
255 254
256MODULE_LICENSE("GPL"); 255MODULE_LICENSE("GPL");
257MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems") 256MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
258MODULE_AUTHOR("Deepak Saxena"); 257MODULE_AUTHOR("Deepak Saxena");
259 258
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index 8cc71409a328..b17bca657daf 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -42,7 +42,6 @@
42 42
43#include <asm/io.h> 43#include <asm/io.h>
44#include <asm/hardware.h> 44#include <asm/hardware.h>
45#include <asm/mach-types.h>
46#include <asm/mach/flash.h> 45#include <asm/mach/flash.h>
47#include <asm/arch/tc.h> 46#include <asm/arch/tc.h>
48 47
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 52385705da09..8dcaa357b4bb 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -21,7 +21,6 @@
21#include <linux/mtd/partitions.h> 21#include <linux/mtd/partitions.h>
22#include <linux/mtd/concat.h> 22#include <linux/mtd/concat.h>
23 23
24#include <asm/mach-types.h>
25#include <asm/io.h> 24#include <asm/io.h>
26#include <asm/sizes.h> 25#include <asm/sizes.h>
27#include <asm/mach/flash.h> 26#include <asm/mach/flash.h>
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index d15da6fd84c1..b7f093fbf9b0 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -82,7 +82,7 @@ int __init init_sharpsl(void)
82 } else if (machine_is_tosa()) { 82 } else if (machine_is_tosa()) {
83 sharpsl_partitions[0].size=0x006a0000; 83 sharpsl_partitions[0].size=0x006a0000;
84 sharpsl_partitions[0].offset=0x00160000; 84 sharpsl_partitions[0].offset=0x00160000;
85 } else if (machine_is_spitz()) { 85 } else if (machine_is_spitz() || machine_is_akita() || machine_is_borzoi()) {
86 sharpsl_partitions[0].size=0x006b0000; 86 sharpsl_partitions[0].size=0x006b0000;
87 sharpsl_partitions[0].offset=0x00140000; 87 sharpsl_partitions[0].offset=0x00140000;
88 } else { 88 } else {
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 891e3a1b9110..b47ebcb31e0f 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -58,7 +58,6 @@
58#include <linux/mtd/partitions.h> 58#include <linux/mtd/partitions.h>
59 59
60#include <asm/io.h> 60#include <asm/io.h>
61#include <asm/mach-types.h>
62#include <asm/hardware/clock.h> 61#include <asm/hardware/clock.h>
63 62
64#include <asm/arch/regs-nand.h> 63#include <asm/arch/regs-nand.h>
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 9853b87bb756..88b5b5b40b43 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -221,10 +221,16 @@ sharpsl_nand_init(void)
221 sharpsl_partition_info[1].size=25 * 1024 * 1024; 221 sharpsl_partition_info[1].size=25 * 1024 * 1024;
222 } else if (machine_is_husky()) { 222 } else if (machine_is_husky()) {
223 sharpsl_partition_info[1].size=53 * 1024 * 1024; 223 sharpsl_partition_info[1].size=53 * 1024 * 1024;
224 } 224 } else if (machine_is_spitz()) {
225 sharpsl_partition_info[1].size=5 * 1024 * 1024;
226 } else if (machine_is_akita()) {
227 sharpsl_partition_info[1].size=58 * 1024 * 1024;
228 } else if (machine_is_borzoi()) {
229 sharpsl_partition_info[1].size=32 * 1024 * 1024;
230 }
225 } 231 }
226 232
227 if (machine_is_husky()) { 233 if (machine_is_husky() || machine_is_borzoi()) {
228 /* Need to use small eraseblock size for backward compatibility */ 234 /* Need to use small eraseblock size for backward compatibility */
229 sharpsl_mtd->flags |= MTD_NO_VIRTBLOCKS; 235 sharpsl_mtd->flags |= MTD_NO_VIRTBLOCKS;
230 } 236 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 34b80de34fae..bc537440ca02 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -353,8 +353,6 @@ struct cp_private {
353 353
354 struct net_device_stats net_stats; 354 struct net_device_stats net_stats;
355 struct cp_extra_stats cp_stats; 355 struct cp_extra_stats cp_stats;
356 struct cp_dma_stats *nic_stats;
357 dma_addr_t nic_stats_dma;
358 356
359 unsigned rx_tail ____cacheline_aligned; 357 unsigned rx_tail ____cacheline_aligned;
360 struct cp_desc *rx_ring; 358 struct cp_desc *rx_ring;
@@ -1143,10 +1141,6 @@ static int cp_alloc_rings (struct cp_private *cp)
1143 cp->rx_ring = mem; 1141 cp->rx_ring = mem;
1144 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; 1142 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1145 1143
1146 mem += (CP_RING_BYTES - CP_STATS_SIZE);
1147 cp->nic_stats = mem;
1148 cp->nic_stats_dma = cp->ring_dma + (CP_RING_BYTES - CP_STATS_SIZE);
1149
1150 return cp_init_rings(cp); 1144 return cp_init_rings(cp);
1151} 1145}
1152 1146
@@ -1187,7 +1181,6 @@ static void cp_free_rings (struct cp_private *cp)
1187 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); 1181 pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1188 cp->rx_ring = NULL; 1182 cp->rx_ring = NULL;
1189 cp->tx_ring = NULL; 1183 cp->tx_ring = NULL;
1190 cp->nic_stats = NULL;
1191} 1184}
1192 1185
1193static int cp_open (struct net_device *dev) 1186static int cp_open (struct net_device *dev)
@@ -1516,13 +1509,17 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1516 struct ethtool_stats *estats, u64 *tmp_stats) 1509 struct ethtool_stats *estats, u64 *tmp_stats)
1517{ 1510{
1518 struct cp_private *cp = netdev_priv(dev); 1511 struct cp_private *cp = netdev_priv(dev);
1512 struct cp_dma_stats *nic_stats;
1513 dma_addr_t dma;
1519 int i; 1514 int i;
1520 1515
1521 memset(cp->nic_stats, 0, sizeof(struct cp_dma_stats)); 1516 nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
1517 if (!nic_stats)
1518 return;
1522 1519
1523 /* begin NIC statistics dump */ 1520 /* begin NIC statistics dump */
1524 cpw32(StatsAddr + 4, (cp->nic_stats_dma >> 16) >> 16); 1521 cpw32(StatsAddr + 4, (u64)dma >> 32);
1525 cpw32(StatsAddr, (cp->nic_stats_dma & 0xffffffff) | DumpStats); 1522 cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1526 cpr32(StatsAddr); 1523 cpr32(StatsAddr);
1527 1524
1528 for (i = 0; i < 1000; i++) { 1525 for (i = 0; i < 1000; i++) {
@@ -1532,24 +1529,27 @@ static void cp_get_ethtool_stats (struct net_device *dev,
1532 } 1529 }
1533 cpw32(StatsAddr, 0); 1530 cpw32(StatsAddr, 0);
1534 cpw32(StatsAddr + 4, 0); 1531 cpw32(StatsAddr + 4, 0);
1532 cpr32(StatsAddr);
1535 1533
1536 i = 0; 1534 i = 0;
1537 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_ok); 1535 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1538 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok); 1536 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1539 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->tx_err); 1537 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1540 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_err); 1538 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1541 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->rx_fifo); 1539 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1542 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->frame_align); 1540 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1543 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_1col); 1541 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1544 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->tx_ok_mcol); 1542 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1545 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_phys); 1543 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1546 tmp_stats[i++] = le64_to_cpu(cp->nic_stats->rx_ok_bcast); 1544 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1547 tmp_stats[i++] = le32_to_cpu(cp->nic_stats->rx_ok_mcast); 1545 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1548 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_abort); 1546 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1549 tmp_stats[i++] = le16_to_cpu(cp->nic_stats->tx_underrun); 1547 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1550 tmp_stats[i++] = cp->cp_stats.rx_frags; 1548 tmp_stats[i++] = cp->cp_stats.rx_frags;
1551 if (i != CP_NUM_STATS) 1549 if (i != CP_NUM_STATS)
1552 BUG(); 1550 BUG();
1551
1552 pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
1553} 1553}
1554 1554
1555static struct ethtool_ops cp_ethtool_ops = { 1555static struct ethtool_ops cp_ethtool_ops = {
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index 6d76f3a99b17..f87027420081 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -1094,7 +1094,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1094 1094
1095 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); 1095 outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1096 1096
1097 if (inb_p(e8390_base) & E8390_TRANS) 1097 if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS)
1098 { 1098 {
1099 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", 1099 printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n",
1100 dev->name); 1100 dev->name);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 54fff9c2e802..2a908c4690a7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -548,6 +548,14 @@ config SUNGEM
548 Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also 548 Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
549 <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>. 549 <http://www.sun.com/products-n-solutions/hardware/docs/pdf/806-3985-10.pdf>.
550 550
551config CASSINI
552 tristate "Sun Cassini support"
553 depends on NET_ETHERNET && PCI
554 select CRC32
555 help
556 Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
557 <http://www.sun.com/products-n-solutions/hardware/docs/pdf/817-4341-10.pdf>
558
551config NET_VENDOR_3COM 559config NET_VENDOR_3COM
552 bool "3COM cards" 560 bool "3COM cards"
553 depends on NET_ETHERNET && (ISA || EISA || MCA || PCI) 561 depends on NET_ETHERNET && (ISA || EISA || MCA || PCI)
@@ -1951,7 +1959,7 @@ config SKGE
1951 ---help--- 1959 ---help---
1952 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx 1960 This driver support the Marvell Yukon or SysKonnect SK-98xx/SK-95xx
1953 and related Gigabit Ethernet adapters. It is a new smaller driver 1961 and related Gigabit Ethernet adapters. It is a new smaller driver
1954 driver with better performance and more complete ethtool support. 1962 with better performance and more complete ethtool support.
1955 1963
1956 It does not support the link failover and network management 1964 It does not support the link failover and network management
1957 features that "portable" vendor supplied sk98lin driver does. 1965 features that "portable" vendor supplied sk98lin driver does.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8645c843cf4d..8aeec9f2495b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_SUNQE) += sunqe.o
28obj-$(CONFIG_SUNBMAC) += sunbmac.o 28obj-$(CONFIG_SUNBMAC) += sunbmac.o
29obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o 29obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o
30obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o 30obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o
31obj-$(CONFIG_CASSINI) += cassini.o
31 32
32obj-$(CONFIG_MACE) += mace.o 33obj-$(CONFIG_MACE) += mace.o
33obj-$(CONFIG_BMAC) += bmac.o 34obj-$(CONFIG_BMAC) += bmac.o
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 52c77cbe8c62..1f0302735416 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -160,7 +160,7 @@ static int __init com90io_probe(struct net_device *dev)
160 return -ENODEV; 160 return -ENODEV;
161 } 161 }
162 if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "com90io probe")) { 162 if (!request_region(ioaddr, ARCNET_TOTAL_SIZE, "com90io probe")) {
163 BUGMSG(D_INIT_REASONS, "IO check_region %x-%x failed.\n", 163 BUGMSG(D_INIT_REASONS, "IO request_region %x-%x failed.\n",
164 ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1); 164 ioaddr, ioaddr + ARCNET_TOTAL_SIZE - 1);
165 return -ENXIO; 165 return -ENXIO;
166 } 166 }
@@ -242,7 +242,7 @@ static int __init com90io_found(struct net_device *dev)
242 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); 242 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
243 return -ENODEV; 243 return -ENODEV;
244 } 244 }
245 /* Reserve the I/O region - guaranteed to work by check_region */ 245 /* Reserve the I/O region */
246 if (!request_region(dev->base_addr, ARCNET_TOTAL_SIZE, "arcnet (COM90xx-IO)")) { 246 if (!request_region(dev->base_addr, ARCNET_TOTAL_SIZE, "arcnet (COM90xx-IO)")) {
247 free_irq(dev->irq, dev); 247 free_irq(dev->irq, dev);
248 return -EBUSY; 248 return -EBUSY;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 9b659e3c8d67..c56d86d371a9 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -15,16 +15,13 @@
15 */ 15 */
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/fcntl.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
20#include <linux/ioport.h> 19#include <linux/ioport.h>
21#include <linux/in.h>
22#include <linux/slab.h> 20#include <linux/slab.h>
23#include <linux/string.h> 21#include <linux/string.h>
24#include <linux/errno.h> 22#include <linux/errno.h>
25#include <linux/netdevice.h> 23#include <linux/netdevice.h>
26#include <linux/etherdevice.h> 24#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/delay.h> 25#include <linux/delay.h>
29#include <linux/init.h> 26#include <linux/init.h>
30#include <linux/crc32.h> 27#include <linux/crc32.h>
@@ -33,7 +30,6 @@
33#include <asm/system.h> 30#include <asm/system.h>
34#include <asm/irq.h> 31#include <asm/irq.h>
35#include <asm/io.h> 32#include <asm/io.h>
36#include <asm/dma.h>
37 33
38#define TX_BUFFERS 15 34#define TX_BUFFERS 15
39#define RX_BUFFERS 25 35#define RX_BUFFERS 25
@@ -85,7 +81,7 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
85 u_short v; 81 u_short v;
86 __asm__( 82 __asm__(
87 "str%?h %1, [%2] @ NAT_RAP\n\t" 83 "str%?h %1, [%2] @ NAT_RAP\n\t"
88 "str%?h %0, [%2, #8] @ NET_IDP\n\t" 84 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
89 : "=r" (v) 85 : "=r" (v)
90 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 86 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
91 return v; 87 return v;
@@ -288,7 +284,7 @@ static void am79c961_timer(unsigned long data)
288 else if (!lnkstat && carrier) 284 else if (!lnkstat && carrier)
289 netif_carrier_off(dev); 285 netif_carrier_off(dev);
290 286
291 mod_timer(&priv->timer, jiffies + 5*HZ); 287 mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
292} 288}
293 289
294/* 290/*
@@ -709,13 +705,9 @@ static int __init am79c961_init(void)
709 goto release; 705 goto release;
710 706
711 am79c961_banner(); 707 am79c961_banner();
712 printk(KERN_INFO "%s: ether address ", dev->name);
713 708
714 /* Retrive and print the ethernet address. */ 709 for (i = 0; i < 6; i++)
715 for (i = 0; i < 6; i++) {
716 dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff; 710 dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
717 printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]);
718 }
719 711
720 spin_lock_init(&priv->chip_lock); 712 spin_lock_init(&priv->chip_lock);
721 init_timer(&priv->timer); 713 init_timer(&priv->timer);
@@ -736,8 +728,14 @@ static int __init am79c961_init(void)
736#endif 728#endif
737 729
738 ret = register_netdev(dev); 730 ret = register_netdev(dev);
739 if (ret == 0) 731 if (ret == 0) {
732 printk(KERN_INFO "%s: ether address ", dev->name);
733
734 for (i = 0; i < 6; i++)
735 printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]);
736
740 return 0; 737 return 0;
738 }
741 739
742release: 740release:
743 release_region(dev->base_addr, 0x18); 741 release_region(dev->base_addr, 0x18);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 8dc657fc8afb..60dba4a1ca5c 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -218,7 +218,7 @@ void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
218 218
219 219
220static inline 220static inline
221volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) 221unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
222{ 222{
223 return in_le16((void __iomem *)dev->base_addr + reg_offset); 223 return in_le16((void __iomem *)dev->base_addr + reg_offset);
224} 224}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 94c9f68dd16b..bf81cd45e4d4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1653,7 +1653,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1653 int old_features = bond_dev->features; 1653 int old_features = bond_dev->features;
1654 int res = 0; 1654 int res = 0;
1655 1655
1656 if (slave_dev->do_ioctl == NULL) { 1656 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
1657 slave_dev->do_ioctl == NULL) {
1657 printk(KERN_WARNING DRV_NAME 1658 printk(KERN_WARNING DRV_NAME
1658 ": Warning : no link monitoring support for %s\n", 1659 ": Warning : no link monitoring support for %s\n",
1659 slave_dev->name); 1660 slave_dev->name);
@@ -2775,7 +2776,7 @@ static u32 bond_glean_dev_ip(struct net_device *dev)
2775 return 0; 2776 return 0;
2776 2777
2777 rcu_read_lock(); 2778 rcu_read_lock();
2778 idev = __in_dev_get(dev); 2779 idev = __in_dev_get_rcu(dev);
2779 if (!idev) 2780 if (!idev)
2780 goto out; 2781 goto out;
2781 2782
@@ -2879,6 +2880,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2879 * This target is not on a VLAN 2880 * This target is not on a VLAN
2880 */ 2881 */
2881 if (rt->u.dst.dev == bond->dev) { 2882 if (rt->u.dst.dev == bond->dev) {
2883 ip_rt_put(rt);
2882 dprintk("basa: rtdev == bond->dev: arp_send\n"); 2884 dprintk("basa: rtdev == bond->dev: arp_send\n");
2883 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2885 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2884 bond->master_ip, 0); 2886 bond->master_ip, 0);
@@ -2898,6 +2900,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2898 } 2900 }
2899 2901
2900 if (vlan_id) { 2902 if (vlan_id) {
2903 ip_rt_put(rt);
2901 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2904 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2902 vlan->vlan_ip, vlan_id); 2905 vlan->vlan_ip, vlan_id);
2903 continue; 2906 continue;
@@ -2909,6 +2912,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2909 bond->dev->name, NIPQUAD(fl.fl4_dst), 2912 bond->dev->name, NIPQUAD(fl.fl4_dst),
2910 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL"); 2913 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
2911 } 2914 }
2915 ip_rt_put(rt);
2912 } 2916 }
2913} 2917}
2914 2918
@@ -5036,6 +5040,14 @@ static int __init bonding_init(void)
5036 return 0; 5040 return 0;
5037 5041
5038out_err: 5042out_err:
5043 /*
5044 * rtnl_unlock() will run netdev_run_todo(), putting the
5045 * thus-far-registered bonding devices into a state which
5046 * unregigister_netdevice() will accept
5047 */
5048 rtnl_unlock();
5049 rtnl_lock();
5050
5039 /* free and unregister all bonds that were successfully added */ 5051 /* free and unregister all bonds that were successfully added */
5040 bond_free_all(); 5052 bond_free_all();
5041 5053
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
new file mode 100644
index 000000000000..2e617424d3fb
--- /dev/null
+++ b/drivers/net/cassini.c
@@ -0,0 +1,5237 @@
1/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
2 *
3 * Copyright (C) 2004 Sun Microsystems Inc.
4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
19 * 02111-1307, USA.
20 *
21 * This driver uses the sungem driver (c) David Miller
22 * (davem@redhat.com) as its basis.
23 *
24 * The cassini chip has a number of features that distinguish it from
25 * the gem chip:
26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or
27 * load balancing (non-VLAN mode)
28 * batching of multiple packets
29 * multiple CPU dispatching
30 * page-based RX descriptor engine with separate completion rings
31 * Gigabit support (GMII and PCS interface)
32 * MIF link up/down detection works
33 *
34 * RX is handled by page sized buffers that are attached as fragments to
35 * the skb. here's what's done:
36 * -- driver allocates pages at a time and keeps reference counts
37 * on them.
38 * -- the upper protocol layers assume that the header is in the skb
39 * itself. as a result, cassini will copy a small amount (64 bytes)
40 * to make them happy.
41 * -- driver appends the rest of the data pages as frags to skbuffs
42 * and increments the reference count
43 * -- on page reclamation, the driver swaps the page with a spare page.
44 * if that page is still in use, it frees its reference to that page,
45 * and allocates a new page for use. otherwise, it just recycles the
46 * the page.
47 *
48 * NOTE: cassini can parse the header. however, it's not worth it
49 * as long as the network stack requires a header copy.
50 *
51 * TX has 4 queues. currently these queues are used in a round-robin
52 * fashion for load balancing. They can also be used for QoS. for that
53 * to work, however, QoS information needs to be exposed down to the driver
54 * level so that subqueues get targetted to particular transmit rings.
55 * alternatively, the queues can be configured via use of the all-purpose
56 * ioctl.
57 *
58 * RX DATA: the rx completion ring has all the info, but the rx desc
59 * ring has all of the data. RX can conceivably come in under multiple
60 * interrupts, but the INT# assignment needs to be set up properly by
61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
62 * that. also, the two descriptor rings are designed to distinguish between
63 * encrypted and non-encrypted packets, but we use them for buffering
64 * instead.
65 *
66 * by default, the selective clear mask is set up to process rx packets.
67 */
68
69#include <linux/config.h>
70#include <linux/version.h>
71
72#include <linux/module.h>
73#include <linux/kernel.h>
74#include <linux/types.h>
75#include <linux/compiler.h>
76#include <linux/slab.h>
77#include <linux/delay.h>
78#include <linux/init.h>
79#include <linux/ioport.h>
80#include <linux/pci.h>
81#include <linux/mm.h>
82#include <linux/highmem.h>
83#include <linux/list.h>
84#include <linux/dma-mapping.h>
85
86#include <linux/netdevice.h>
87#include <linux/etherdevice.h>
88#include <linux/skbuff.h>
89#include <linux/ethtool.h>
90#include <linux/crc32.h>
91#include <linux/random.h>
92#include <linux/mii.h>
93#include <linux/ip.h>
94#include <linux/tcp.h>
95
96#include <net/checksum.h>
97
98#include <asm/atomic.h>
99#include <asm/system.h>
100#include <asm/io.h>
101#include <asm/byteorder.h>
102#include <asm/uaccess.h>
103
104#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
105#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
106#define CAS_NCPUS num_online_cpus()
107
108#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
109#define USE_NAPI
110#define cas_skb_release(x) netif_receive_skb(x)
111#else
112#define cas_skb_release(x) netif_rx(x)
113#endif
114
115/* select which firmware to use */
116#define USE_HP_WORKAROUND
117#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
118#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
119
120#include "cassini.h"
121
122#define USE_TX_COMPWB /* use completion writeback registers */
123#define USE_CSMA_CD_PROTO /* standard CSMA/CD */
124#define USE_RX_BLANK /* hw interrupt mitigation */
125#undef USE_ENTROPY_DEV /* don't test for entropy device */
126
127/* NOTE: these aren't useable unless PCI interrupts can be assigned.
128 * also, we need to make cp->lock finer-grained.
129 */
130#undef USE_PCI_INTB
131#undef USE_PCI_INTC
132#undef USE_PCI_INTD
133#undef USE_QOS
134
135#undef USE_VPD_DEBUG /* debug vpd information if defined */
136
137/* rx processing options */
138#define USE_PAGE_ORDER /* specify to allocate large rx pages */
139#define RX_DONT_BATCH 0 /* if 1, don't batch flows */
140#define RX_COPY_ALWAYS 0 /* if 0, use frags */
141#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
142#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
143
144#define DRV_MODULE_NAME "cassini"
145#define PFX DRV_MODULE_NAME ": "
146#define DRV_MODULE_VERSION "1.4"
147#define DRV_MODULE_RELDATE "1 July 2004"
148
149#define CAS_DEF_MSG_ENABLE \
150 (NETIF_MSG_DRV | \
151 NETIF_MSG_PROBE | \
152 NETIF_MSG_LINK | \
153 NETIF_MSG_TIMER | \
154 NETIF_MSG_IFDOWN | \
155 NETIF_MSG_IFUP | \
156 NETIF_MSG_RX_ERR | \
157 NETIF_MSG_TX_ERR)
158
159/* length of time before we decide the hardware is borked,
160 * and dev->tx_timeout() should be called to fix the problem
161 */
162#define CAS_TX_TIMEOUT (HZ)
163#define CAS_LINK_TIMEOUT (22*HZ/10)
164#define CAS_LINK_FAST_TIMEOUT (1)
165
166/* timeout values for state changing. these specify the number
167 * of 10us delays to be used before giving up.
168 */
169#define STOP_TRIES_PHY 1000
170#define STOP_TRIES 5000
171
172/* specify a minimum frame size to deal with some fifo issues
173 * max mtu == 2 * page size - ethernet header - 64 - swivel =
174 * 2 * page_size - 0x50
175 */
176#define CAS_MIN_FRAME 97
177#define CAS_1000MB_MIN_FRAME 255
178#define CAS_MIN_MTU 60
179#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
180
181#if 1
182/*
183 * Eliminate these and use separate atomic counters for each, to
184 * avoid a race condition.
185 */
186#else
187#define CAS_RESET_MTU 1
188#define CAS_RESET_ALL 2
189#define CAS_RESET_SPARE 3
190#endif
191
192static char version[] __devinitdata =
193 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
194
195MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
196MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
197MODULE_LICENSE("GPL");
198MODULE_PARM(cassini_debug, "i");
199MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
200MODULE_PARM(link_mode, "i");
201MODULE_PARM_DESC(link_mode, "default link mode");
202
203/*
204 * Work around for a PCS bug in which the link goes down due to the chip
205 * being confused and never showing a link status of "up."
206 */
207#define DEFAULT_LINKDOWN_TIMEOUT 5
208/*
209 * Value in seconds, for user input.
210 */
211static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
212MODULE_PARM(linkdown_timeout, "i");
213MODULE_PARM_DESC(linkdown_timeout,
214"min reset interval in sec. for PCS linkdown issue; disabled if not positive");
215
216/*
217 * value in 'ticks' (units used by jiffies). Set when we init the
218 * module because 'HZ' in actually a function call on some flavors of
219 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
220 */
221static int link_transition_timeout;
222
223
224static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
225static int link_mode;
226
227static u16 link_modes[] __devinitdata = {
228 BMCR_ANENABLE, /* 0 : autoneg */
229 0, /* 1 : 10bt half duplex */
230 BMCR_SPEED100, /* 2 : 100bt half duplex */
231 BMCR_FULLDPLX, /* 3 : 10bt full duplex */
232 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
233 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
234};
235
236static struct pci_device_id cas_pci_tbl[] __devinitdata = {
237 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
241 { 0, }
242};
243
244MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
245
246static void cas_set_link_modes(struct cas *cp);
247
248static inline void cas_lock_tx(struct cas *cp)
249{
250 int i;
251
252 for (i = 0; i < N_TX_RINGS; i++)
253 spin_lock(&cp->tx_lock[i]);
254}
255
256static inline void cas_lock_all(struct cas *cp)
257{
258 spin_lock_irq(&cp->lock);
259 cas_lock_tx(cp);
260}
261
262/* WTZ: QA was finding deadlock problems with the previous
263 * versions after long test runs with multiple cards per machine.
264 * See if replacing cas_lock_all with safer versions helps. The
265 * symptoms QA is reporting match those we'd expect if interrupts
266 * aren't being properly restored, and we fixed a previous deadlock
267 * with similar symptoms by using save/restore versions in other
268 * places.
269 */
270#define cas_lock_all_save(cp, flags) \
271do { \
272 struct cas *xxxcp = (cp); \
273 spin_lock_irqsave(&xxxcp->lock, flags); \
274 cas_lock_tx(xxxcp); \
275} while (0)
276
277static inline void cas_unlock_tx(struct cas *cp)
278{
279 int i;
280
281 for (i = N_TX_RINGS; i > 0; i--)
282 spin_unlock(&cp->tx_lock[i - 1]);
283}
284
285static inline void cas_unlock_all(struct cas *cp)
286{
287 cas_unlock_tx(cp);
288 spin_unlock_irq(&cp->lock);
289}
290
291#define cas_unlock_all_restore(cp, flags) \
292do { \
293 struct cas *xxxcp = (cp); \
294 cas_unlock_tx(xxxcp); \
295 spin_unlock_irqrestore(&xxxcp->lock, flags); \
296} while (0)
297
298static void cas_disable_irq(struct cas *cp, const int ring)
299{
300 /* Make sure we won't get any more interrupts */
301 if (ring == 0) {
302 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
303 return;
304 }
305
306 /* disable completion interrupts and selectively mask */
307 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
308 switch (ring) {
309#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
310#ifdef USE_PCI_INTB
311 case 1:
312#endif
313#ifdef USE_PCI_INTC
314 case 2:
315#endif
316#ifdef USE_PCI_INTD
317 case 3:
318#endif
319 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
320 cp->regs + REG_PLUS_INTRN_MASK(ring));
321 break;
322#endif
323 default:
324 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
325 REG_PLUS_INTRN_MASK(ring));
326 break;
327 }
328 }
329}
330
331static inline void cas_mask_intr(struct cas *cp)
332{
333 int i;
334
335 for (i = 0; i < N_RX_COMP_RINGS; i++)
336 cas_disable_irq(cp, i);
337}
338
339static void cas_enable_irq(struct cas *cp, const int ring)
340{
341 if (ring == 0) { /* all but TX_DONE */
342 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
343 return;
344 }
345
346 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
347 switch (ring) {
348#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
349#ifdef USE_PCI_INTB
350 case 1:
351#endif
352#ifdef USE_PCI_INTC
353 case 2:
354#endif
355#ifdef USE_PCI_INTD
356 case 3:
357#endif
358 writel(INTRN_MASK_RX_EN, cp->regs +
359 REG_PLUS_INTRN_MASK(ring));
360 break;
361#endif
362 default:
363 break;
364 }
365 }
366}
367
368static inline void cas_unmask_intr(struct cas *cp)
369{
370 int i;
371
372 for (i = 0; i < N_RX_COMP_RINGS; i++)
373 cas_enable_irq(cp, i);
374}
375
376static inline void cas_entropy_gather(struct cas *cp)
377{
378#ifdef USE_ENTROPY_DEV
379 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
380 return;
381
382 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
383 readl(cp->regs + REG_ENTROPY_IV),
384 sizeof(uint64_t)*8);
385#endif
386}
387
388static inline void cas_entropy_reset(struct cas *cp)
389{
390#ifdef USE_ENTROPY_DEV
391 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
392 return;
393
394 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
395 cp->regs + REG_BIM_LOCAL_DEV_EN);
396 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
397 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
398
399 /* if we read back 0x0, we don't have an entropy device */
400 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
401 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
402#endif
403}
404
405/* access to the phy. the following assumes that we've initialized the MIF to
406 * be in frame rather than bit-bang mode
407 */
408static u16 cas_phy_read(struct cas *cp, int reg)
409{
410 u32 cmd;
411 int limit = STOP_TRIES_PHY;
412
413 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
414 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
415 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
416 cmd |= MIF_FRAME_TURN_AROUND_MSB;
417 writel(cmd, cp->regs + REG_MIF_FRAME);
418
419 /* poll for completion */
420 while (limit-- > 0) {
421 udelay(10);
422 cmd = readl(cp->regs + REG_MIF_FRAME);
423 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
424 return (cmd & MIF_FRAME_DATA_MASK);
425 }
426 return 0xFFFF; /* -1 */
427}
428
429static int cas_phy_write(struct cas *cp, int reg, u16 val)
430{
431 int limit = STOP_TRIES_PHY;
432 u32 cmd;
433
434 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
435 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
436 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
437 cmd |= MIF_FRAME_TURN_AROUND_MSB;
438 cmd |= val & MIF_FRAME_DATA_MASK;
439 writel(cmd, cp->regs + REG_MIF_FRAME);
440
441 /* poll for completion */
442 while (limit-- > 0) {
443 udelay(10);
444 cmd = readl(cp->regs + REG_MIF_FRAME);
445 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
446 return 0;
447 }
448 return -1;
449}
450
451static void cas_phy_powerup(struct cas *cp)
452{
453 u16 ctl = cas_phy_read(cp, MII_BMCR);
454
455 if ((ctl & BMCR_PDOWN) == 0)
456 return;
457 ctl &= ~BMCR_PDOWN;
458 cas_phy_write(cp, MII_BMCR, ctl);
459}
460
461static void cas_phy_powerdown(struct cas *cp)
462{
463 u16 ctl = cas_phy_read(cp, MII_BMCR);
464
465 if (ctl & BMCR_PDOWN)
466 return;
467 ctl |= BMCR_PDOWN;
468 cas_phy_write(cp, MII_BMCR, ctl);
469}
470
471/* cp->lock held. note: the last put_page will free the buffer */
472static int cas_page_free(struct cas *cp, cas_page_t *page)
473{
474 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
475 PCI_DMA_FROMDEVICE);
476 __free_pages(page->buffer, cp->page_order);
477 kfree(page);
478 return 0;
479}
480
481#ifdef RX_COUNT_BUFFERS
482#define RX_USED_ADD(x, y) ((x)->used += (y))
483#define RX_USED_SET(x, y) ((x)->used = (y))
484#else
485#define RX_USED_ADD(x, y)
486#define RX_USED_SET(x, y)
487#endif
488
489/* local page allocation routines for the receive buffers. jumbo pages
490 * require at least 8K contiguous and 8K aligned buffers.
491 */
492static cas_page_t *cas_page_alloc(struct cas *cp, const int flags)
493{
494 cas_page_t *page;
495
496 page = kmalloc(sizeof(cas_page_t), flags);
497 if (!page)
498 return NULL;
499
500 INIT_LIST_HEAD(&page->list);
501 RX_USED_SET(page, 0);
502 page->buffer = alloc_pages(flags, cp->page_order);
503 if (!page->buffer)
504 goto page_err;
505 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
506 cp->page_size, PCI_DMA_FROMDEVICE);
507 return page;
508
509page_err:
510 kfree(page);
511 return NULL;
512}
513
514/* initialize spare pool of rx buffers, but allocate during the open */
515static void cas_spare_init(struct cas *cp)
516{
517 spin_lock(&cp->rx_inuse_lock);
518 INIT_LIST_HEAD(&cp->rx_inuse_list);
519 spin_unlock(&cp->rx_inuse_lock);
520
521 spin_lock(&cp->rx_spare_lock);
522 INIT_LIST_HEAD(&cp->rx_spare_list);
523 cp->rx_spares_needed = RX_SPARE_COUNT;
524 spin_unlock(&cp->rx_spare_lock);
525}
526
527/* used on close. free all the spare buffers. */
528static void cas_spare_free(struct cas *cp)
529{
530 struct list_head list, *elem, *tmp;
531
532 /* free spare buffers */
533 INIT_LIST_HEAD(&list);
534 spin_lock(&cp->rx_spare_lock);
535 list_splice(&cp->rx_spare_list, &list);
536 INIT_LIST_HEAD(&cp->rx_spare_list);
537 spin_unlock(&cp->rx_spare_lock);
538 list_for_each_safe(elem, tmp, &list) {
539 cas_page_free(cp, list_entry(elem, cas_page_t, list));
540 }
541
542 INIT_LIST_HEAD(&list);
543#if 1
544 /*
545 * Looks like Adrian had protected this with a different
546 * lock than used everywhere else to manipulate this list.
547 */
548 spin_lock(&cp->rx_inuse_lock);
549 list_splice(&cp->rx_inuse_list, &list);
550 INIT_LIST_HEAD(&cp->rx_inuse_list);
551 spin_unlock(&cp->rx_inuse_lock);
552#else
553 spin_lock(&cp->rx_spare_lock);
554 list_splice(&cp->rx_inuse_list, &list);
555 INIT_LIST_HEAD(&cp->rx_inuse_list);
556 spin_unlock(&cp->rx_spare_lock);
557#endif
558 list_for_each_safe(elem, tmp, &list) {
559 cas_page_free(cp, list_entry(elem, cas_page_t, list));
560 }
561}
562
563/* replenish spares if needed */
564static void cas_spare_recover(struct cas *cp, const int flags)
565{
566 struct list_head list, *elem, *tmp;
567 int needed, i;
568
569 /* check inuse list. if we don't need any more free buffers,
570 * just free it
571 */
572
573 /* make a local copy of the list */
574 INIT_LIST_HEAD(&list);
575 spin_lock(&cp->rx_inuse_lock);
576 list_splice(&cp->rx_inuse_list, &list);
577 INIT_LIST_HEAD(&cp->rx_inuse_list);
578 spin_unlock(&cp->rx_inuse_lock);
579
580 list_for_each_safe(elem, tmp, &list) {
581 cas_page_t *page = list_entry(elem, cas_page_t, list);
582
583 if (page_count(page->buffer) > 1)
584 continue;
585
586 list_del(elem);
587 spin_lock(&cp->rx_spare_lock);
588 if (cp->rx_spares_needed > 0) {
589 list_add(elem, &cp->rx_spare_list);
590 cp->rx_spares_needed--;
591 spin_unlock(&cp->rx_spare_lock);
592 } else {
593 spin_unlock(&cp->rx_spare_lock);
594 cas_page_free(cp, page);
595 }
596 }
597
598 /* put any inuse buffers back on the list */
599 if (!list_empty(&list)) {
600 spin_lock(&cp->rx_inuse_lock);
601 list_splice(&list, &cp->rx_inuse_list);
602 spin_unlock(&cp->rx_inuse_lock);
603 }
604
605 spin_lock(&cp->rx_spare_lock);
606 needed = cp->rx_spares_needed;
607 spin_unlock(&cp->rx_spare_lock);
608 if (!needed)
609 return;
610
611 /* we still need spares, so try to allocate some */
612 INIT_LIST_HEAD(&list);
613 i = 0;
614 while (i < needed) {
615 cas_page_t *spare = cas_page_alloc(cp, flags);
616 if (!spare)
617 break;
618 list_add(&spare->list, &list);
619 i++;
620 }
621
622 spin_lock(&cp->rx_spare_lock);
623 list_splice(&list, &cp->rx_spare_list);
624 cp->rx_spares_needed -= i;
625 spin_unlock(&cp->rx_spare_lock);
626}
627
628/* pull a page from the list. */
629static cas_page_t *cas_page_dequeue(struct cas *cp)
630{
631 struct list_head *entry;
632 int recover;
633
634 spin_lock(&cp->rx_spare_lock);
635 if (list_empty(&cp->rx_spare_list)) {
636 /* try to do a quick recovery */
637 spin_unlock(&cp->rx_spare_lock);
638 cas_spare_recover(cp, GFP_ATOMIC);
639 spin_lock(&cp->rx_spare_lock);
640 if (list_empty(&cp->rx_spare_list)) {
641 if (netif_msg_rx_err(cp))
642 printk(KERN_ERR "%s: no spare buffers "
643 "available.\n", cp->dev->name);
644 spin_unlock(&cp->rx_spare_lock);
645 return NULL;
646 }
647 }
648
649 entry = cp->rx_spare_list.next;
650 list_del(entry);
651 recover = ++cp->rx_spares_needed;
652 spin_unlock(&cp->rx_spare_lock);
653
654 /* trigger the timer to do the recovery */
655 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
656#if 1
657 atomic_inc(&cp->reset_task_pending);
658 atomic_inc(&cp->reset_task_pending_spare);
659 schedule_work(&cp->reset_task);
660#else
661 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
662 schedule_work(&cp->reset_task);
663#endif
664 }
665 return list_entry(entry, cas_page_t, list);
666}
667
668
669static void cas_mif_poll(struct cas *cp, const int enable)
670{
671 u32 cfg;
672
673 cfg = readl(cp->regs + REG_MIF_CFG);
674 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
675
676 if (cp->phy_type & CAS_PHY_MII_MDIO1)
677 cfg |= MIF_CFG_PHY_SELECT;
678
679 /* poll and interrupt on link status change. */
680 if (enable) {
681 cfg |= MIF_CFG_POLL_EN;
682 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
683 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
684 }
685 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
686 cp->regs + REG_MIF_MASK);
687 writel(cfg, cp->regs + REG_MIF_CFG);
688}
689
690/* Must be invoked under cp->lock */
691static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
692{
693 u16 ctl;
694#if 1
695 int lcntl;
696 int changed = 0;
697 int oldstate = cp->lstate;
698 int link_was_not_down = !(oldstate == link_down);
699#endif
700 /* Setup link parameters */
701 if (!ep)
702 goto start_aneg;
703 lcntl = cp->link_cntl;
704 if (ep->autoneg == AUTONEG_ENABLE)
705 cp->link_cntl = BMCR_ANENABLE;
706 else {
707 cp->link_cntl = 0;
708 if (ep->speed == SPEED_100)
709 cp->link_cntl |= BMCR_SPEED100;
710 else if (ep->speed == SPEED_1000)
711 cp->link_cntl |= CAS_BMCR_SPEED1000;
712 if (ep->duplex == DUPLEX_FULL)
713 cp->link_cntl |= BMCR_FULLDPLX;
714 }
715#if 1
716 changed = (lcntl != cp->link_cntl);
717#endif
718start_aneg:
719 if (cp->lstate == link_up) {
720 printk(KERN_INFO "%s: PCS link down.\n",
721 cp->dev->name);
722 } else {
723 if (changed) {
724 printk(KERN_INFO "%s: link configuration changed\n",
725 cp->dev->name);
726 }
727 }
728 cp->lstate = link_down;
729 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
730 if (!cp->hw_running)
731 return;
732#if 1
733 /*
734 * WTZ: If the old state was link_up, we turn off the carrier
735 * to replicate everything we do elsewhere on a link-down
736 * event when we were already in a link-up state..
737 */
738 if (oldstate == link_up)
739 netif_carrier_off(cp->dev);
740 if (changed && link_was_not_down) {
741 /*
742 * WTZ: This branch will simply schedule a full reset after
743 * we explicitly changed link modes in an ioctl. See if this
744 * fixes the link-problems we were having for forced mode.
745 */
746 atomic_inc(&cp->reset_task_pending);
747 atomic_inc(&cp->reset_task_pending_all);
748 schedule_work(&cp->reset_task);
749 cp->timer_ticks = 0;
750 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
751 return;
752 }
753#endif
754 if (cp->phy_type & CAS_PHY_SERDES) {
755 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
756
757 if (cp->link_cntl & BMCR_ANENABLE) {
758 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
759 cp->lstate = link_aneg;
760 } else {
761 if (cp->link_cntl & BMCR_FULLDPLX)
762 val |= PCS_MII_CTRL_DUPLEX;
763 val &= ~PCS_MII_AUTONEG_EN;
764 cp->lstate = link_force_ok;
765 }
766 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
767 writel(val, cp->regs + REG_PCS_MII_CTRL);
768
769 } else {
770 cas_mif_poll(cp, 0);
771 ctl = cas_phy_read(cp, MII_BMCR);
772 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
773 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
774 ctl |= cp->link_cntl;
775 if (ctl & BMCR_ANENABLE) {
776 ctl |= BMCR_ANRESTART;
777 cp->lstate = link_aneg;
778 } else {
779 cp->lstate = link_force_ok;
780 }
781 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
782 cas_phy_write(cp, MII_BMCR, ctl);
783 cas_mif_poll(cp, 1);
784 }
785
786 cp->timer_ticks = 0;
787 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
788}
789
790/* Must be invoked under cp->lock. */
791static int cas_reset_mii_phy(struct cas *cp)
792{
793 int limit = STOP_TRIES_PHY;
794 u16 val;
795
796 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
797 udelay(100);
798 while (limit--) {
799 val = cas_phy_read(cp, MII_BMCR);
800 if ((val & BMCR_RESET) == 0)
801 break;
802 udelay(10);
803 }
804 return (limit <= 0);
805}
806
807static void cas_saturn_firmware_load(struct cas *cp)
808{
809 cas_saturn_patch_t *patch = cas_saturn_patch;
810
811 cas_phy_powerdown(cp);
812
813 /* expanded memory access mode */
814 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
815
816 /* pointer configuration for new firmware */
817 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
818 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
819 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
820 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
821 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
822 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
823 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
824 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
825
826 /* download new firmware */
827 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
828 cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
829 while (patch->addr) {
830 cas_phy_write(cp, DP83065_MII_REGD, patch->val);
831 patch++;
832 }
833
834 /* enable firmware */
835 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
836 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
837}
838
839
840/* phy initialization */
841static void cas_phy_init(struct cas *cp)
842{
843 u16 val;
844
845 /* if we're in MII/GMII mode, set up phy */
846 if (CAS_PHY_MII(cp->phy_type)) {
847 writel(PCS_DATAPATH_MODE_MII,
848 cp->regs + REG_PCS_DATAPATH_MODE);
849
850 cas_mif_poll(cp, 0);
851 cas_reset_mii_phy(cp); /* take out of isolate mode */
852
853 if (PHY_LUCENT_B0 == cp->phy_id) {
854 /* workaround link up/down issue with lucent */
855 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
856 cas_phy_write(cp, MII_BMCR, 0x00f1);
857 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
858
859 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
860 /* workarounds for broadcom phy */
861 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
862 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
863 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
864 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
865 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
866 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
867 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
868 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
869 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
870 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
871 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
872
873 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
874 val = cas_phy_read(cp, BROADCOM_MII_REG4);
875 val = cas_phy_read(cp, BROADCOM_MII_REG4);
876 if (val & 0x0080) {
877 /* link workaround */
878 cas_phy_write(cp, BROADCOM_MII_REG4,
879 val & ~0x0080);
880 }
881
882 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
883 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
884 SATURN_PCFG_FSI : 0x0,
885 cp->regs + REG_SATURN_PCFG);
886
887 /* load firmware to address 10Mbps auto-negotiation
888 * issue. NOTE: this will need to be changed if the
889 * default firmware gets fixed.
890 */
891 if (PHY_NS_DP83065 == cp->phy_id) {
892 cas_saturn_firmware_load(cp);
893 }
894 cas_phy_powerup(cp);
895 }
896
897 /* advertise capabilities */
898 val = cas_phy_read(cp, MII_BMCR);
899 val &= ~BMCR_ANENABLE;
900 cas_phy_write(cp, MII_BMCR, val);
901 udelay(10);
902
903 cas_phy_write(cp, MII_ADVERTISE,
904 cas_phy_read(cp, MII_ADVERTISE) |
905 (ADVERTISE_10HALF | ADVERTISE_10FULL |
906 ADVERTISE_100HALF | ADVERTISE_100FULL |
907 CAS_ADVERTISE_PAUSE |
908 CAS_ADVERTISE_ASYM_PAUSE));
909
910 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
911 /* make sure that we don't advertise half
912 * duplex to avoid a chip issue
913 */
914 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
915 val &= ~CAS_ADVERTISE_1000HALF;
916 val |= CAS_ADVERTISE_1000FULL;
917 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
918 }
919
920 } else {
921 /* reset pcs for serdes */
922 u32 val;
923 int limit;
924
925 writel(PCS_DATAPATH_MODE_SERDES,
926 cp->regs + REG_PCS_DATAPATH_MODE);
927
928 /* enable serdes pins on saturn */
929 if (cp->cas_flags & CAS_FLAG_SATURN)
930 writel(0, cp->regs + REG_SATURN_PCFG);
931
932 /* Reset PCS unit. */
933 val = readl(cp->regs + REG_PCS_MII_CTRL);
934 val |= PCS_MII_RESET;
935 writel(val, cp->regs + REG_PCS_MII_CTRL);
936
937 limit = STOP_TRIES;
938 while (limit-- > 0) {
939 udelay(10);
940 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
941 PCS_MII_RESET) == 0)
942 break;
943 }
944 if (limit <= 0)
945 printk(KERN_WARNING "%s: PCS reset bit would not "
946 "clear [%08x].\n", cp->dev->name,
947 readl(cp->regs + REG_PCS_STATE_MACHINE));
948
949 /* Make sure PCS is disabled while changing advertisement
950 * configuration.
951 */
952 writel(0x0, cp->regs + REG_PCS_CFG);
953
954 /* Advertise all capabilities except half-duplex. */
955 val = readl(cp->regs + REG_PCS_MII_ADVERT);
956 val &= ~PCS_MII_ADVERT_HD;
957 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
958 PCS_MII_ADVERT_ASYM_PAUSE);
959 writel(val, cp->regs + REG_PCS_MII_ADVERT);
960
961 /* enable PCS */
962 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
963
964 /* pcs workaround: enable sync detect */
965 writel(PCS_SERDES_CTRL_SYNCD_EN,
966 cp->regs + REG_PCS_SERDES_CTRL);
967 }
968}
969
970
971static int cas_pcs_link_check(struct cas *cp)
972{
973 u32 stat, state_machine;
974 int retval = 0;
975
976 /* The link status bit latches on zero, so you must
977 * read it twice in such a case to see a transition
978 * to the link being up.
979 */
980 stat = readl(cp->regs + REG_PCS_MII_STATUS);
981 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
982 stat = readl(cp->regs + REG_PCS_MII_STATUS);
983
984 /* The remote-fault indication is only valid
985 * when autoneg has completed.
986 */
987 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
988 PCS_MII_STATUS_REMOTE_FAULT)) ==
989 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
990 if (netif_msg_link(cp))
991 printk(KERN_INFO "%s: PCS RemoteFault\n",
992 cp->dev->name);
993 }
994
995 /* work around link detection issue by querying the PCS state
996 * machine directly.
997 */
998 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
999 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1000 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1001 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1002 stat |= PCS_MII_STATUS_LINK_STATUS;
1003 }
1004
1005 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1006 if (cp->lstate != link_up) {
1007 if (cp->opened) {
1008 cp->lstate = link_up;
1009 cp->link_transition = LINK_TRANSITION_LINK_UP;
1010
1011 cas_set_link_modes(cp);
1012 netif_carrier_on(cp->dev);
1013 }
1014 }
1015 } else if (cp->lstate == link_up) {
1016 cp->lstate = link_down;
1017 if (link_transition_timeout != 0 &&
1018 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1019 !cp->link_transition_jiffies_valid) {
1020 /*
1021 * force a reset, as a workaround for the
1022 * link-failure problem. May want to move this to a
1023 * point a bit earlier in the sequence. If we had
1024 * generated a reset a short time ago, we'll wait for
1025 * the link timer to check the status until a
1026 * timer expires (link_transistion_jiffies_valid is
1027 * true when the timer is running.) Instead of using
1028 * a system timer, we just do a check whenever the
1029 * link timer is running - this clears the flag after
1030 * a suitable delay.
1031 */
1032 retval = 1;
1033 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1034 cp->link_transition_jiffies = jiffies;
1035 cp->link_transition_jiffies_valid = 1;
1036 } else {
1037 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1038 }
1039 netif_carrier_off(cp->dev);
1040 if (cp->opened && netif_msg_link(cp)) {
1041 printk(KERN_INFO "%s: PCS link down.\n",
1042 cp->dev->name);
1043 }
1044
1045 /* Cassini only: if you force a mode, there can be
1046 * sync problems on link down. to fix that, the following
1047 * things need to be checked:
1048 * 1) read serialink state register
1049 * 2) read pcs status register to verify link down.
1050 * 3) if link down and serial link == 0x03, then you need
1051 * to global reset the chip.
1052 */
1053 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1054 /* should check to see if we're in a forced mode */
1055 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1056 if (stat == 0x03)
1057 return 1;
1058 }
1059 } else if (cp->lstate == link_down) {
1060 if (link_transition_timeout != 0 &&
1061 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1062 !cp->link_transition_jiffies_valid) {
1063 /* force a reset, as a workaround for the
1064 * link-failure problem. May want to move
1065 * this to a point a bit earlier in the
1066 * sequence.
1067 */
1068 retval = 1;
1069 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1070 cp->link_transition_jiffies = jiffies;
1071 cp->link_transition_jiffies_valid = 1;
1072 } else {
1073 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1074 }
1075 }
1076
1077 return retval;
1078}
1079
1080static int cas_pcs_interrupt(struct net_device *dev,
1081 struct cas *cp, u32 status)
1082{
1083 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1084
1085 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1086 return 0;
1087 return cas_pcs_link_check(cp);
1088}
1089
1090static int cas_txmac_interrupt(struct net_device *dev,
1091 struct cas *cp, u32 status)
1092{
1093 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1094
1095 if (!txmac_stat)
1096 return 0;
1097
1098 if (netif_msg_intr(cp))
1099 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
1100 cp->dev->name, txmac_stat);
1101
1102 /* Defer timer expiration is quite normal,
1103 * don't even log the event.
1104 */
1105 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1106 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1107 return 0;
1108
1109 spin_lock(&cp->stat_lock[0]);
1110 if (txmac_stat & MAC_TX_UNDERRUN) {
1111 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
1112 dev->name);
1113 cp->net_stats[0].tx_fifo_errors++;
1114 }
1115
1116 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1117 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
1118 dev->name);
1119 cp->net_stats[0].tx_errors++;
1120 }
1121
1122 /* The rest are all cases of one of the 16-bit TX
1123 * counters expiring.
1124 */
1125 if (txmac_stat & MAC_TX_COLL_NORMAL)
1126 cp->net_stats[0].collisions += 0x10000;
1127
1128 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1129 cp->net_stats[0].tx_aborted_errors += 0x10000;
1130 cp->net_stats[0].collisions += 0x10000;
1131 }
1132
1133 if (txmac_stat & MAC_TX_COLL_LATE) {
1134 cp->net_stats[0].tx_aborted_errors += 0x10000;
1135 cp->net_stats[0].collisions += 0x10000;
1136 }
1137 spin_unlock(&cp->stat_lock[0]);
1138
1139 /* We do not keep track of MAC_TX_COLL_FIRST and
1140 * MAC_TX_PEAK_ATTEMPTS events.
1141 */
1142 return 0;
1143}
1144
1145static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1146{
1147 cas_hp_inst_t *inst;
1148 u32 val;
1149 int i;
1150
1151 i = 0;
1152 while ((inst = firmware) && inst->note) {
1153 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1154
1155 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1156 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1157 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1158
1159 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1160 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1161 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1162 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1163 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1164 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1165 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1166 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1167
1168 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1169 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1170 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1171 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1172 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1173 ++firmware;
1174 ++i;
1175 }
1176}
1177
1178static void cas_init_rx_dma(struct cas *cp)
1179{
1180 u64 desc_dma = cp->block_dvma;
1181 u32 val;
1182 int i, size;
1183
1184 /* rx free descriptors */
1185 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1186 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1187 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1188 if ((N_RX_DESC_RINGS > 1) &&
1189 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
1190 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1191 writel(val, cp->regs + REG_RX_CFG);
1192
1193 val = (unsigned long) cp->init_rxds[0] -
1194 (unsigned long) cp->init_block;
1195 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1196 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1197 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1198
1199 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1200 /* rx desc 2 is for IPSEC packets. however,
1201 * we don't it that for that purpose.
1202 */
1203 val = (unsigned long) cp->init_rxds[1] -
1204 (unsigned long) cp->init_block;
1205 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1206 writel((desc_dma + val) & 0xffffffff, cp->regs +
1207 REG_PLUS_RX_DB1_LOW);
1208 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1209 REG_PLUS_RX_KICK1);
1210 }
1211
1212 /* rx completion registers */
1213 val = (unsigned long) cp->init_rxcs[0] -
1214 (unsigned long) cp->init_block;
1215 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1216 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1217
1218 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1219 /* rx comp 2-4 */
1220 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1221 val = (unsigned long) cp->init_rxcs[i] -
1222 (unsigned long) cp->init_block;
1223 writel((desc_dma + val) >> 32, cp->regs +
1224 REG_PLUS_RX_CBN_HI(i));
1225 writel((desc_dma + val) & 0xffffffff, cp->regs +
1226 REG_PLUS_RX_CBN_LOW(i));
1227 }
1228 }
1229
1230 /* read selective clear regs to prevent spurious interrupts
1231 * on reset because complete == kick.
1232 * selective clear set up to prevent interrupts on resets
1233 */
1234 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1235 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1236 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1237 for (i = 1; i < N_RX_COMP_RINGS; i++)
1238 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1239
1240 /* 2 is different from 3 and 4 */
1241 if (N_RX_COMP_RINGS > 1)
1242 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1243 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1244
1245 for (i = 2; i < N_RX_COMP_RINGS; i++)
1246 writel(INTR_RX_DONE_ALT,
1247 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1248 }
1249
1250 /* set up pause thresholds */
1251 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1252 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1253 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1254 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1255 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1256
1257 /* zero out dma reassembly buffers */
1258 for (i = 0; i < 64; i++) {
1259 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1260 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1261 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1262 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1263 }
1264
1265 /* make sure address register is 0 for normal operation */
1266 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1267 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1268
1269 /* interrupt mitigation */
1270#ifdef USE_RX_BLANK
1271 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1272 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1273 writel(val, cp->regs + REG_RX_BLANK);
1274#else
1275 writel(0x0, cp->regs + REG_RX_BLANK);
1276#endif
1277
1278 /* interrupt generation as a function of low water marks for
1279 * free desc and completion entries. these are used to trigger
1280 * housekeeping for rx descs. we don't use the free interrupt
1281 * as it's not very useful
1282 */
1283 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1284 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1285 writel(val, cp->regs + REG_RX_AE_THRESH);
1286 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1287 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1288 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1289 }
1290
1291 /* Random early detect registers. useful for congestion avoidance.
1292 * this should be tunable.
1293 */
1294 writel(0x0, cp->regs + REG_RX_RED);
1295
1296 /* receive page sizes. default == 2K (0x800) */
1297 val = 0;
1298 if (cp->page_size == 0x1000)
1299 val = 0x1;
1300 else if (cp->page_size == 0x2000)
1301 val = 0x2;
1302 else if (cp->page_size == 0x4000)
1303 val = 0x3;
1304
1305 /* round mtu + offset. constrain to page size. */
1306 size = cp->dev->mtu + 64;
1307 if (size > cp->page_size)
1308 size = cp->page_size;
1309
1310 if (size <= 0x400)
1311 i = 0x0;
1312 else if (size <= 0x800)
1313 i = 0x1;
1314 else if (size <= 0x1000)
1315 i = 0x2;
1316 else
1317 i = 0x3;
1318
1319 cp->mtu_stride = 1 << (i + 10);
1320 val = CAS_BASE(RX_PAGE_SIZE, val);
1321 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1322 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1323 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1324 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1325
1326 /* enable the header parser if desired */
1327 if (CAS_HP_FIRMWARE == cas_prog_null)
1328 return;
1329
1330 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1331 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1332 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1333 writel(val, cp->regs + REG_HP_CFG);
1334}
1335
1336static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1337{
1338 memset(rxc, 0, sizeof(*rxc));
1339 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1340}
1341
1342/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1343 * flipping is protected by the fact that the chip will not
1344 * hand back the same page index while it's being processed.
1345 */
1346static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1347{
1348 cas_page_t *page = cp->rx_pages[1][index];
1349 cas_page_t *new;
1350
1351 if (page_count(page->buffer) == 1)
1352 return page;
1353
1354 new = cas_page_dequeue(cp);
1355 if (new) {
1356 spin_lock(&cp->rx_inuse_lock);
1357 list_add(&page->list, &cp->rx_inuse_list);
1358 spin_unlock(&cp->rx_inuse_lock);
1359 }
1360 return new;
1361}
1362
1363/* this needs to be changed if we actually use the ENC RX DESC ring */
1364static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1365 const int index)
1366{
1367 cas_page_t **page0 = cp->rx_pages[0];
1368 cas_page_t **page1 = cp->rx_pages[1];
1369
1370 /* swap if buffer is in use */
1371 if (page_count(page0[index]->buffer) > 1) {
1372 cas_page_t *new = cas_page_spare(cp, index);
1373 if (new) {
1374 page1[index] = page0[index];
1375 page0[index] = new;
1376 }
1377 }
1378 RX_USED_SET(page0[index], 0);
1379 return page0[index];
1380}
1381
1382static void cas_clean_rxds(struct cas *cp)
1383{
1384 /* only clean ring 0 as ring 1 is used for spare buffers */
1385 struct cas_rx_desc *rxd = cp->init_rxds[0];
1386 int i, size;
1387
1388 /* release all rx flows */
1389 for (i = 0; i < N_RX_FLOWS; i++) {
1390 struct sk_buff *skb;
1391 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1392 cas_skb_release(skb);
1393 }
1394 }
1395
1396 /* initialize descriptors */
1397 size = RX_DESC_RINGN_SIZE(0);
1398 for (i = 0; i < size; i++) {
1399 cas_page_t *page = cas_page_swap(cp, 0, i);
1400 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1401 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1402 CAS_BASE(RX_INDEX_RING, 0));
1403 }
1404
1405 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1406 cp->rx_last[0] = 0;
1407 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1408}
1409
1410static void cas_clean_rxcs(struct cas *cp)
1411{
1412 int i, j;
1413
1414 /* take ownership of rx comp descriptors */
1415 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1416 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1417 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1418 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1419 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1420 cas_rxc_init(rxc + j);
1421 }
1422 }
1423}
1424
1425#if 0
1426/* When we get a RX fifo overflow, the RX unit is probably hung
1427 * so we do the following.
1428 *
1429 * If any part of the reset goes wrong, we return 1 and that causes the
1430 * whole chip to be reset.
1431 */
1432static int cas_rxmac_reset(struct cas *cp)
1433{
1434 struct net_device *dev = cp->dev;
1435 int limit;
1436 u32 val;
1437
1438 /* First, reset MAC RX. */
1439 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1440 for (limit = 0; limit < STOP_TRIES; limit++) {
1441 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1442 break;
1443 udelay(10);
1444 }
1445 if (limit == STOP_TRIES) {
1446 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
1447 "chip.\n", dev->name);
1448 return 1;
1449 }
1450
1451 /* Second, disable RX DMA. */
1452 writel(0, cp->regs + REG_RX_CFG);
1453 for (limit = 0; limit < STOP_TRIES; limit++) {
1454 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1455 break;
1456 udelay(10);
1457 }
1458 if (limit == STOP_TRIES) {
1459 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
1460 "chip.\n", dev->name);
1461 return 1;
1462 }
1463
1464 mdelay(5);
1465
1466 /* Execute RX reset command. */
1467 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1468 for (limit = 0; limit < STOP_TRIES; limit++) {
1469 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1470 break;
1471 udelay(10);
1472 }
1473 if (limit == STOP_TRIES) {
1474 printk(KERN_ERR "%s: RX reset command will not execute, "
1475 "resetting whole chip.\n", dev->name);
1476 return 1;
1477 }
1478
1479 /* reset driver rx state */
1480 cas_clean_rxds(cp);
1481 cas_clean_rxcs(cp);
1482
1483 /* Now, reprogram the rest of RX unit. */
1484 cas_init_rx_dma(cp);
1485
1486 /* re-enable */
1487 val = readl(cp->regs + REG_RX_CFG);
1488 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1489 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1490 val = readl(cp->regs + REG_MAC_RX_CFG);
1491 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1492 return 0;
1493}
1494#endif
1495
1496static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1497 u32 status)
1498{
1499 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1500
1501 if (!stat)
1502 return 0;
1503
1504 if (netif_msg_intr(cp))
1505 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
1506 cp->dev->name, stat);
1507
1508 /* these are all rollovers */
1509 spin_lock(&cp->stat_lock[0]);
1510 if (stat & MAC_RX_ALIGN_ERR)
1511 cp->net_stats[0].rx_frame_errors += 0x10000;
1512
1513 if (stat & MAC_RX_CRC_ERR)
1514 cp->net_stats[0].rx_crc_errors += 0x10000;
1515
1516 if (stat & MAC_RX_LEN_ERR)
1517 cp->net_stats[0].rx_length_errors += 0x10000;
1518
1519 if (stat & MAC_RX_OVERFLOW) {
1520 cp->net_stats[0].rx_over_errors++;
1521 cp->net_stats[0].rx_fifo_errors++;
1522 }
1523
1524 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1525 * events.
1526 */
1527 spin_unlock(&cp->stat_lock[0]);
1528 return 0;
1529}
1530
1531static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1532 u32 status)
1533{
1534 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1535
1536 if (!stat)
1537 return 0;
1538
1539 if (netif_msg_intr(cp))
1540 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
1541 cp->dev->name, stat);
1542
1543 /* This interrupt is just for pause frame and pause
1544 * tracking. It is useful for diagnostics and debug
1545 * but probably by default we will mask these events.
1546 */
1547 if (stat & MAC_CTRL_PAUSE_STATE)
1548 cp->pause_entered++;
1549
1550 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1551 cp->pause_last_time_recvd = (stat >> 16);
1552
1553 return 0;
1554}
1555
1556
1557/* Must be invoked under cp->lock. */
1558static inline int cas_mdio_link_not_up(struct cas *cp)
1559{
1560 u16 val;
1561
1562 switch (cp->lstate) {
1563 case link_force_ret:
1564 if (netif_msg_link(cp))
1565 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1566 " forced mode\n", cp->dev->name);
1567 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1568 cp->timer_ticks = 5;
1569 cp->lstate = link_force_ok;
1570 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1571 break;
1572
1573 case link_aneg:
1574 val = cas_phy_read(cp, MII_BMCR);
1575
1576 /* Try forced modes. we try things in the following order:
1577 * 1000 full -> 100 full/half -> 10 half
1578 */
1579 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1580 val |= BMCR_FULLDPLX;
1581 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1582 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1583 cas_phy_write(cp, MII_BMCR, val);
1584 cp->timer_ticks = 5;
1585 cp->lstate = link_force_try;
1586 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1587 break;
1588
1589 case link_force_try:
1590 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1591 val = cas_phy_read(cp, MII_BMCR);
1592 cp->timer_ticks = 5;
1593 if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1594 val &= ~CAS_BMCR_SPEED1000;
1595 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1596 cas_phy_write(cp, MII_BMCR, val);
1597 break;
1598 }
1599
1600 if (val & BMCR_SPEED100) {
1601 if (val & BMCR_FULLDPLX) /* fd failed */
1602 val &= ~BMCR_FULLDPLX;
1603 else { /* 100Mbps failed */
1604 val &= ~BMCR_SPEED100;
1605 }
1606 cas_phy_write(cp, MII_BMCR, val);
1607 break;
1608 }
1609 default:
1610 break;
1611 }
1612 return 0;
1613}
1614
1615
1616/* must be invoked with cp->lock held */
1617static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1618{
1619 int restart;
1620
1621 if (bmsr & BMSR_LSTATUS) {
1622 /* Ok, here we got a link. If we had it due to a forced
1623 * fallback, and we were configured for autoneg, we
1624 * retry a short autoneg pass. If you know your hub is
1625 * broken, use ethtool ;)
1626 */
1627 if ((cp->lstate == link_force_try) &&
1628 (cp->link_cntl & BMCR_ANENABLE)) {
1629 cp->lstate = link_force_ret;
1630 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1631 cas_mif_poll(cp, 0);
1632 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1633 cp->timer_ticks = 5;
1634 if (cp->opened && netif_msg_link(cp))
1635 printk(KERN_INFO "%s: Got link after fallback, retrying"
1636 " autoneg once...\n", cp->dev->name);
1637 cas_phy_write(cp, MII_BMCR,
1638 cp->link_fcntl | BMCR_ANENABLE |
1639 BMCR_ANRESTART);
1640 cas_mif_poll(cp, 1);
1641
1642 } else if (cp->lstate != link_up) {
1643 cp->lstate = link_up;
1644 cp->link_transition = LINK_TRANSITION_LINK_UP;
1645
1646 if (cp->opened) {
1647 cas_set_link_modes(cp);
1648 netif_carrier_on(cp->dev);
1649 }
1650 }
1651 return 0;
1652 }
1653
1654 /* link not up. if the link was previously up, we restart the
1655 * whole process
1656 */
1657 restart = 0;
1658 if (cp->lstate == link_up) {
1659 cp->lstate = link_down;
1660 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1661
1662 netif_carrier_off(cp->dev);
1663 if (cp->opened && netif_msg_link(cp))
1664 printk(KERN_INFO "%s: Link down\n",
1665 cp->dev->name);
1666 restart = 1;
1667
1668 } else if (++cp->timer_ticks > 10)
1669 cas_mdio_link_not_up(cp);
1670
1671 return restart;
1672}
1673
1674static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1675 u32 status)
1676{
1677 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1678 u16 bmsr;
1679
1680 /* check for a link change */
1681 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1682 return 0;
1683
1684 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1685 return cas_mii_link_check(cp, bmsr);
1686}
1687
1688static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1689 u32 status)
1690{
1691 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1692
1693 if (!stat)
1694 return 0;
1695
1696 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
1697 readl(cp->regs + REG_BIM_DIAG));
1698
1699 /* cassini+ has this reserved */
1700 if ((stat & PCI_ERR_BADACK) &&
1701 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1702 printk("<No ACK64# during ABS64 cycle> ");
1703
1704 if (stat & PCI_ERR_DTRTO)
1705 printk("<Delayed transaction timeout> ");
1706 if (stat & PCI_ERR_OTHER)
1707 printk("<other> ");
1708 if (stat & PCI_ERR_BIM_DMA_WRITE)
1709 printk("<BIM DMA 0 write req> ");
1710 if (stat & PCI_ERR_BIM_DMA_READ)
1711 printk("<BIM DMA 0 read req> ");
1712 printk("\n");
1713
1714 if (stat & PCI_ERR_OTHER) {
1715 u16 cfg;
1716
1717 /* Interrogate PCI config space for the
1718 * true cause.
1719 */
1720 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1721 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
1722 dev->name, cfg);
1723 if (cfg & PCI_STATUS_PARITY)
1724 printk(KERN_ERR "%s: PCI parity error detected.\n",
1725 dev->name);
1726 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1727 printk(KERN_ERR "%s: PCI target abort.\n",
1728 dev->name);
1729 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1730 printk(KERN_ERR "%s: PCI master acks target abort.\n",
1731 dev->name);
1732 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1733 printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
1734 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1735 printk(KERN_ERR "%s: PCI system error SERR#.\n",
1736 dev->name);
1737 if (cfg & PCI_STATUS_DETECTED_PARITY)
1738 printk(KERN_ERR "%s: PCI parity error.\n",
1739 dev->name);
1740
1741 /* Write the error bits back to clear them. */
1742 cfg &= (PCI_STATUS_PARITY |
1743 PCI_STATUS_SIG_TARGET_ABORT |
1744 PCI_STATUS_REC_TARGET_ABORT |
1745 PCI_STATUS_REC_MASTER_ABORT |
1746 PCI_STATUS_SIG_SYSTEM_ERROR |
1747 PCI_STATUS_DETECTED_PARITY);
1748 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1749 }
1750
1751 /* For all PCI errors, we should reset the chip. */
1752 return 1;
1753}
1754
1755/* All non-normal interrupt conditions get serviced here.
1756 * Returns non-zero if we should just exit the interrupt
1757 * handler right now (ie. if we reset the card which invalidates
1758 * all of the other original irq status bits).
1759 */
1760static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1761 u32 status)
1762{
1763 if (status & INTR_RX_TAG_ERROR) {
1764 /* corrupt RX tag framing */
1765 if (netif_msg_rx_err(cp))
1766 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
1767 cp->dev->name);
1768 spin_lock(&cp->stat_lock[0]);
1769 cp->net_stats[0].rx_errors++;
1770 spin_unlock(&cp->stat_lock[0]);
1771 goto do_reset;
1772 }
1773
1774 if (status & INTR_RX_LEN_MISMATCH) {
1775 /* length mismatch. */
1776 if (netif_msg_rx_err(cp))
1777 printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
1778 cp->dev->name);
1779 spin_lock(&cp->stat_lock[0]);
1780 cp->net_stats[0].rx_errors++;
1781 spin_unlock(&cp->stat_lock[0]);
1782 goto do_reset;
1783 }
1784
1785 if (status & INTR_PCS_STATUS) {
1786 if (cas_pcs_interrupt(dev, cp, status))
1787 goto do_reset;
1788 }
1789
1790 if (status & INTR_TX_MAC_STATUS) {
1791 if (cas_txmac_interrupt(dev, cp, status))
1792 goto do_reset;
1793 }
1794
1795 if (status & INTR_RX_MAC_STATUS) {
1796 if (cas_rxmac_interrupt(dev, cp, status))
1797 goto do_reset;
1798 }
1799
1800 if (status & INTR_MAC_CTRL_STATUS) {
1801 if (cas_mac_interrupt(dev, cp, status))
1802 goto do_reset;
1803 }
1804
1805 if (status & INTR_MIF_STATUS) {
1806 if (cas_mif_interrupt(dev, cp, status))
1807 goto do_reset;
1808 }
1809
1810 if (status & INTR_PCI_ERROR_STATUS) {
1811 if (cas_pci_interrupt(dev, cp, status))
1812 goto do_reset;
1813 }
1814 return 0;
1815
1816do_reset:
1817#if 1
1818 atomic_inc(&cp->reset_task_pending);
1819 atomic_inc(&cp->reset_task_pending_all);
1820 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
1821 dev->name, status);
1822 schedule_work(&cp->reset_task);
1823#else
1824 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1825 printk(KERN_ERR "reset called in cas_abnormal_irq\n");
1826 schedule_work(&cp->reset_task);
1827#endif
1828 return 1;
1829}
1830
1831/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1832 * determining whether to do a netif_stop/wakeup
1833 */
1834#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1835#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1836static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1837 const int len)
1838{
1839 unsigned long off = addr + len;
1840
1841 if (CAS_TABORT(cp) == 1)
1842 return 0;
1843 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1844 return 0;
1845 return TX_TARGET_ABORT_LEN;
1846}
1847
1848static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1849{
1850 struct cas_tx_desc *txds;
1851 struct sk_buff **skbs;
1852 struct net_device *dev = cp->dev;
1853 int entry, count;
1854
1855 spin_lock(&cp->tx_lock[ring]);
1856 txds = cp->init_txds[ring];
1857 skbs = cp->tx_skbs[ring];
1858 entry = cp->tx_old[ring];
1859
1860 count = TX_BUFF_COUNT(ring, entry, limit);
1861 while (entry != limit) {
1862 struct sk_buff *skb = skbs[entry];
1863 dma_addr_t daddr;
1864 u32 dlen;
1865 int frag;
1866
1867 if (!skb) {
1868 /* this should never occur */
1869 entry = TX_DESC_NEXT(ring, entry);
1870 continue;
1871 }
1872
1873 /* however, we might get only a partial skb release. */
1874 count -= skb_shinfo(skb)->nr_frags +
1875 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1876 if (count < 0)
1877 break;
1878
1879 if (netif_msg_tx_done(cp))
1880 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
1881 cp->dev->name, ring, entry);
1882
1883 skbs[entry] = NULL;
1884 cp->tx_tiny_use[ring][entry].nbufs = 0;
1885
1886 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1887 struct cas_tx_desc *txd = txds + entry;
1888
1889 daddr = le64_to_cpu(txd->buffer);
1890 dlen = CAS_VAL(TX_DESC_BUFLEN,
1891 le64_to_cpu(txd->control));
1892 pci_unmap_page(cp->pdev, daddr, dlen,
1893 PCI_DMA_TODEVICE);
1894 entry = TX_DESC_NEXT(ring, entry);
1895
1896 /* tiny buffer may follow */
1897 if (cp->tx_tiny_use[ring][entry].used) {
1898 cp->tx_tiny_use[ring][entry].used = 0;
1899 entry = TX_DESC_NEXT(ring, entry);
1900 }
1901 }
1902
1903 spin_lock(&cp->stat_lock[ring]);
1904 cp->net_stats[ring].tx_packets++;
1905 cp->net_stats[ring].tx_bytes += skb->len;
1906 spin_unlock(&cp->stat_lock[ring]);
1907 dev_kfree_skb_irq(skb);
1908 }
1909 cp->tx_old[ring] = entry;
1910
1911 /* this is wrong for multiple tx rings. the net device needs
1912 * multiple queues for this to do the right thing. we wait
1913 * for 2*packets to be available when using tiny buffers
1914 */
1915 if (netif_queue_stopped(dev) &&
1916 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1917 netif_wake_queue(dev);
1918 spin_unlock(&cp->tx_lock[ring]);
1919}
1920
1921static void cas_tx(struct net_device *dev, struct cas *cp,
1922 u32 status)
1923{
1924 int limit, ring;
1925#ifdef USE_TX_COMPWB
1926 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1927#endif
1928 if (netif_msg_intr(cp))
1929 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
1930 cp->dev->name, status, compwb);
1931 /* process all the rings */
1932 for (ring = 0; ring < N_TX_RINGS; ring++) {
1933#ifdef USE_TX_COMPWB
1934 /* use the completion writeback registers */
1935 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1936 CAS_VAL(TX_COMPWB_LSB, compwb);
1937 compwb = TX_COMPWB_NEXT(compwb);
1938#else
1939 limit = readl(cp->regs + REG_TX_COMPN(ring));
1940#endif
1941 if (cp->tx_old[ring] != limit)
1942 cas_tx_ringN(cp, ring, limit);
1943 }
1944}
1945
1946
1947static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1948 int entry, const u64 *words,
1949 struct sk_buff **skbref)
1950{
1951 int dlen, hlen, len, i, alloclen;
1952 int off, swivel = RX_SWIVEL_OFF_VAL;
1953 struct cas_page *page;
1954 struct sk_buff *skb;
1955 void *addr, *crcaddr;
1956 char *p;
1957
1958 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1959 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1960 len = hlen + dlen;
1961
1962 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1963 alloclen = len;
1964 else
1965 alloclen = max(hlen, RX_COPY_MIN);
1966
1967 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
1968 if (skb == NULL)
1969 return -1;
1970
1971 *skbref = skb;
1972 skb->dev = cp->dev;
1973 skb_reserve(skb, swivel);
1974
1975 p = skb->data;
1976 addr = crcaddr = NULL;
1977 if (hlen) { /* always copy header pages */
1978 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1979 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1980 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1981 swivel;
1982
1983 i = hlen;
1984 if (!dlen) /* attach FCS */
1985 i += cp->crc_size;
1986 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1987 PCI_DMA_FROMDEVICE);
1988 addr = cas_page_map(page->buffer);
1989 memcpy(p, addr + off, i);
1990 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1991 PCI_DMA_FROMDEVICE);
1992 cas_page_unmap(addr);
1993 RX_USED_ADD(page, 0x100);
1994 p += hlen;
1995 swivel = 0;
1996 }
1997
1998
1999 if (alloclen < (hlen + dlen)) {
2000 skb_frag_t *frag = skb_shinfo(skb)->frags;
2001
2002 /* normal or jumbo packets. we use frags */
2003 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2004 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2005 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2006
2007 hlen = min(cp->page_size - off, dlen);
2008 if (hlen < 0) {
2009 if (netif_msg_rx_err(cp)) {
2010 printk(KERN_DEBUG "%s: rx page overflow: "
2011 "%d\n", cp->dev->name, hlen);
2012 }
2013 dev_kfree_skb_irq(skb);
2014 return -1;
2015 }
2016 i = hlen;
2017 if (i == dlen) /* attach FCS */
2018 i += cp->crc_size;
2019 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2020 PCI_DMA_FROMDEVICE);
2021
2022 /* make sure we always copy a header */
2023 swivel = 0;
2024 if (p == (char *) skb->data) { /* not split */
2025 addr = cas_page_map(page->buffer);
2026 memcpy(p, addr + off, RX_COPY_MIN);
2027 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2028 PCI_DMA_FROMDEVICE);
2029 cas_page_unmap(addr);
2030 off += RX_COPY_MIN;
2031 swivel = RX_COPY_MIN;
2032 RX_USED_ADD(page, cp->mtu_stride);
2033 } else {
2034 RX_USED_ADD(page, hlen);
2035 }
2036 skb_put(skb, alloclen);
2037
2038 skb_shinfo(skb)->nr_frags++;
2039 skb->data_len += hlen - swivel;
2040 skb->len += hlen - swivel;
2041
2042 get_page(page->buffer);
2043 frag->page = page->buffer;
2044 frag->page_offset = off;
2045 frag->size = hlen - swivel;
2046
2047 /* any more data? */
2048 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2049 hlen = dlen;
2050 off = 0;
2051
2052 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2053 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2054 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2055 hlen + cp->crc_size,
2056 PCI_DMA_FROMDEVICE);
2057 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2058 hlen + cp->crc_size,
2059 PCI_DMA_FROMDEVICE);
2060
2061 skb_shinfo(skb)->nr_frags++;
2062 skb->data_len += hlen;
2063 skb->len += hlen;
2064 frag++;
2065
2066 get_page(page->buffer);
2067 frag->page = page->buffer;
2068 frag->page_offset = 0;
2069 frag->size = hlen;
2070 RX_USED_ADD(page, hlen + cp->crc_size);
2071 }
2072
2073 if (cp->crc_size) {
2074 addr = cas_page_map(page->buffer);
2075 crcaddr = addr + off + hlen;
2076 }
2077
2078 } else {
2079 /* copying packet */
2080 if (!dlen)
2081 goto end_copy_pkt;
2082
2083 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2084 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2085 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2086 hlen = min(cp->page_size - off, dlen);
2087 if (hlen < 0) {
2088 if (netif_msg_rx_err(cp)) {
2089 printk(KERN_DEBUG "%s: rx page overflow: "
2090 "%d\n", cp->dev->name, hlen);
2091 }
2092 dev_kfree_skb_irq(skb);
2093 return -1;
2094 }
2095 i = hlen;
2096 if (i == dlen) /* attach FCS */
2097 i += cp->crc_size;
2098 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2099 PCI_DMA_FROMDEVICE);
2100 addr = cas_page_map(page->buffer);
2101 memcpy(p, addr + off, i);
2102 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2103 PCI_DMA_FROMDEVICE);
2104 cas_page_unmap(addr);
2105 if (p == (char *) skb->data) /* not split */
2106 RX_USED_ADD(page, cp->mtu_stride);
2107 else
2108 RX_USED_ADD(page, i);
2109
2110 /* any more data? */
2111 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2112 p += hlen;
2113 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2114 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2115 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2116 dlen + cp->crc_size,
2117 PCI_DMA_FROMDEVICE);
2118 addr = cas_page_map(page->buffer);
2119 memcpy(p, addr, dlen + cp->crc_size);
2120 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2121 dlen + cp->crc_size,
2122 PCI_DMA_FROMDEVICE);
2123 cas_page_unmap(addr);
2124 RX_USED_ADD(page, dlen + cp->crc_size);
2125 }
2126end_copy_pkt:
2127 if (cp->crc_size) {
2128 addr = NULL;
2129 crcaddr = skb->data + alloclen;
2130 }
2131 skb_put(skb, alloclen);
2132 }
2133
2134 i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
2135 if (cp->crc_size) {
2136 /* checksum includes FCS. strip it out. */
2137 i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
2138 if (addr)
2139 cas_page_unmap(addr);
2140 }
2141 skb->csum = ntohs(i ^ 0xffff);
2142 skb->ip_summed = CHECKSUM_HW;
2143 skb->protocol = eth_type_trans(skb, cp->dev);
2144 return len;
2145}
2146
2147
2148/* we can handle up to 64 rx flows at a time. we do the same thing
2149 * as nonreassm except that we batch up the buffers.
2150 * NOTE: we currently just treat each flow as a bunch of packets that
2151 * we pass up. a better way would be to coalesce the packets
2152 * into a jumbo packet. to do that, we need to do the following:
2153 * 1) the first packet will have a clean split between header and
2154 * data. save both.
2155 * 2) each time the next flow packet comes in, extend the
2156 * data length and merge the checksums.
2157 * 3) on flow release, fix up the header.
2158 * 4) make sure the higher layer doesn't care.
2159 * because packets get coalesced, we shouldn't run into fragment count
2160 * issues.
2161 */
2162static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2163 struct sk_buff *skb)
2164{
2165 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2166 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2167
2168 /* this is protected at a higher layer, so no need to
2169 * do any additional locking here. stick the buffer
2170 * at the end.
2171 */
2172 __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
2173 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2174 while ((skb = __skb_dequeue(flow))) {
2175 cas_skb_release(skb);
2176 }
2177 }
2178}
2179
2180/* put rx descriptor back on ring. if a buffer is in use by a higher
2181 * layer, this will need to put in a replacement.
2182 */
2183static void cas_post_page(struct cas *cp, const int ring, const int index)
2184{
2185 cas_page_t *new;
2186 int entry;
2187
2188 entry = cp->rx_old[ring];
2189
2190 new = cas_page_swap(cp, ring, index);
2191 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2192 cp->init_rxds[ring][entry].index =
2193 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2194 CAS_BASE(RX_INDEX_RING, ring));
2195
2196 entry = RX_DESC_ENTRY(ring, entry + 1);
2197 cp->rx_old[ring] = entry;
2198
2199 if (entry % 4)
2200 return;
2201
2202 if (ring == 0)
2203 writel(entry, cp->regs + REG_RX_KICK);
2204 else if ((N_RX_DESC_RINGS > 1) &&
2205 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2206 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2207}
2208
2209
2210/* only when things are bad */
2211static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2212{
2213 unsigned int entry, last, count, released;
2214 int cluster;
2215 cas_page_t **page = cp->rx_pages[ring];
2216
2217 entry = cp->rx_old[ring];
2218
2219 if (netif_msg_intr(cp))
2220 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
2221 cp->dev->name, ring, entry);
2222
2223 cluster = -1;
2224 count = entry & 0x3;
2225 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2226 released = 0;
2227 while (entry != last) {
2228 /* make a new buffer if it's still in use */
2229 if (page_count(page[entry]->buffer) > 1) {
2230 cas_page_t *new = cas_page_dequeue(cp);
2231 if (!new) {
2232 /* let the timer know that we need to
2233 * do this again
2234 */
2235 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2236 if (!timer_pending(&cp->link_timer))
2237 mod_timer(&cp->link_timer, jiffies +
2238 CAS_LINK_FAST_TIMEOUT);
2239 cp->rx_old[ring] = entry;
2240 cp->rx_last[ring] = num ? num - released : 0;
2241 return -ENOMEM;
2242 }
2243 spin_lock(&cp->rx_inuse_lock);
2244 list_add(&page[entry]->list, &cp->rx_inuse_list);
2245 spin_unlock(&cp->rx_inuse_lock);
2246 cp->init_rxds[ring][entry].buffer =
2247 cpu_to_le64(new->dma_addr);
2248 page[entry] = new;
2249
2250 }
2251
2252 if (++count == 4) {
2253 cluster = entry;
2254 count = 0;
2255 }
2256 released++;
2257 entry = RX_DESC_ENTRY(ring, entry + 1);
2258 }
2259 cp->rx_old[ring] = entry;
2260
2261 if (cluster < 0)
2262 return 0;
2263
2264 if (ring == 0)
2265 writel(cluster, cp->regs + REG_RX_KICK);
2266 else if ((N_RX_DESC_RINGS > 1) &&
2267 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2268 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2269 return 0;
2270}
2271
2272
2273/* process a completion ring. packets are set up in three basic ways:
2274 * small packets: should be copied header + data in single buffer.
2275 * large packets: header and data in a single buffer.
2276 * split packets: header in a separate buffer from data.
2277 * data may be in multiple pages. data may be > 256
2278 * bytes but in a single page.
2279 *
2280 * NOTE: RX page posting is done in this routine as well. while there's
2281 * the capability of using multiple RX completion rings, it isn't
2282 * really worthwhile due to the fact that the page posting will
2283 * force serialization on the single descriptor ring.
2284 */
2285static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2286{
2287 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2288 int entry, drops;
2289 int npackets = 0;
2290
2291 if (netif_msg_intr(cp))
2292 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
2293 cp->dev->name, ring,
2294 readl(cp->regs + REG_RX_COMP_HEAD),
2295 cp->rx_new[ring]);
2296
2297 entry = cp->rx_new[ring];
2298 drops = 0;
2299 while (1) {
2300 struct cas_rx_comp *rxc = rxcs + entry;
2301 struct sk_buff *skb;
2302 int type, len;
2303 u64 words[4];
2304 int i, dring;
2305
2306 words[0] = le64_to_cpu(rxc->word1);
2307 words[1] = le64_to_cpu(rxc->word2);
2308 words[2] = le64_to_cpu(rxc->word3);
2309 words[3] = le64_to_cpu(rxc->word4);
2310
2311 /* don't touch if still owned by hw */
2312 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2313 if (type == 0)
2314 break;
2315
2316 /* hw hasn't cleared the zero bit yet */
2317 if (words[3] & RX_COMP4_ZERO) {
2318 break;
2319 }
2320
2321 /* get info on the packet */
2322 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2323 spin_lock(&cp->stat_lock[ring]);
2324 cp->net_stats[ring].rx_errors++;
2325 if (words[3] & RX_COMP4_LEN_MISMATCH)
2326 cp->net_stats[ring].rx_length_errors++;
2327 if (words[3] & RX_COMP4_BAD)
2328 cp->net_stats[ring].rx_crc_errors++;
2329 spin_unlock(&cp->stat_lock[ring]);
2330
2331 /* We'll just return it to Cassini. */
2332 drop_it:
2333 spin_lock(&cp->stat_lock[ring]);
2334 ++cp->net_stats[ring].rx_dropped;
2335 spin_unlock(&cp->stat_lock[ring]);
2336 goto next;
2337 }
2338
2339 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2340 if (len < 0) {
2341 ++drops;
2342 goto drop_it;
2343 }
2344
2345 /* see if it's a flow re-assembly or not. the driver
2346 * itself handles release back up.
2347 */
2348 if (RX_DONT_BATCH || (type == 0x2)) {
2349 /* non-reassm: these always get released */
2350 cas_skb_release(skb);
2351 } else {
2352 cas_rx_flow_pkt(cp, words, skb);
2353 }
2354
2355 spin_lock(&cp->stat_lock[ring]);
2356 cp->net_stats[ring].rx_packets++;
2357 cp->net_stats[ring].rx_bytes += len;
2358 spin_unlock(&cp->stat_lock[ring]);
2359 cp->dev->last_rx = jiffies;
2360
2361 next:
2362 npackets++;
2363
2364 /* should it be released? */
2365 if (words[0] & RX_COMP1_RELEASE_HDR) {
2366 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2367 dring = CAS_VAL(RX_INDEX_RING, i);
2368 i = CAS_VAL(RX_INDEX_NUM, i);
2369 cas_post_page(cp, dring, i);
2370 }
2371
2372 if (words[0] & RX_COMP1_RELEASE_DATA) {
2373 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2374 dring = CAS_VAL(RX_INDEX_RING, i);
2375 i = CAS_VAL(RX_INDEX_NUM, i);
2376 cas_post_page(cp, dring, i);
2377 }
2378
2379 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2380 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2381 dring = CAS_VAL(RX_INDEX_RING, i);
2382 i = CAS_VAL(RX_INDEX_NUM, i);
2383 cas_post_page(cp, dring, i);
2384 }
2385
2386 /* skip to the next entry */
2387 entry = RX_COMP_ENTRY(ring, entry + 1 +
2388 CAS_VAL(RX_COMP1_SKIP, words[0]));
2389#ifdef USE_NAPI
2390 if (budget && (npackets >= budget))
2391 break;
2392#endif
2393 }
2394 cp->rx_new[ring] = entry;
2395
2396 if (drops)
2397 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
2398 cp->dev->name);
2399 return npackets;
2400}
2401
2402
2403/* put completion entries back on the ring */
2404static void cas_post_rxcs_ringN(struct net_device *dev,
2405 struct cas *cp, int ring)
2406{
2407 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2408 int last, entry;
2409
2410 last = cp->rx_cur[ring];
2411 entry = cp->rx_new[ring];
2412 if (netif_msg_intr(cp))
2413 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
2414 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
2415 entry);
2416
2417 /* zero and re-mark descriptors */
2418 while (last != entry) {
2419 cas_rxc_init(rxc + last);
2420 last = RX_COMP_ENTRY(ring, last + 1);
2421 }
2422 cp->rx_cur[ring] = last;
2423
2424 if (ring == 0)
2425 writel(last, cp->regs + REG_RX_COMP_TAIL);
2426 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2427 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2428}
2429
2430
2431
2432/* cassini can use all four PCI interrupts for the completion ring.
2433 * rings 3 and 4 are identical
2434 */
2435#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2436static inline void cas_handle_irqN(struct net_device *dev,
2437 struct cas *cp, const u32 status,
2438 const int ring)
2439{
2440 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2441 cas_post_rxcs_ringN(dev, cp, ring);
2442}
2443
2444static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
2445{
2446 struct net_device *dev = dev_id;
2447 struct cas *cp = netdev_priv(dev);
2448 unsigned long flags;
2449 int ring;
2450 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2451
2452 /* check for shared irq */
2453 if (status == 0)
2454 return IRQ_NONE;
2455
2456 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2457 spin_lock_irqsave(&cp->lock, flags);
2458 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2459#ifdef USE_NAPI
2460 cas_mask_intr(cp);
2461 netif_rx_schedule(dev);
2462#else
2463 cas_rx_ringN(cp, ring, 0);
2464#endif
2465 status &= ~INTR_RX_DONE_ALT;
2466 }
2467
2468 if (status)
2469 cas_handle_irqN(dev, cp, status, ring);
2470 spin_unlock_irqrestore(&cp->lock, flags);
2471 return IRQ_HANDLED;
2472}
2473#endif
2474
2475#ifdef USE_PCI_INTB
2476/* everything but rx packets */
2477static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2478{
2479 if (status & INTR_RX_BUF_UNAVAIL_1) {
2480 /* Frame arrived, no free RX buffers available.
2481 * NOTE: we can get this on a link transition. */
2482 cas_post_rxds_ringN(cp, 1, 0);
2483 spin_lock(&cp->stat_lock[1]);
2484 cp->net_stats[1].rx_dropped++;
2485 spin_unlock(&cp->stat_lock[1]);
2486 }
2487
2488 if (status & INTR_RX_BUF_AE_1)
2489 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2490 RX_AE_FREEN_VAL(1));
2491
2492 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493 cas_post_rxcs_ringN(cp, 1);
2494}
2495
2496/* ring 2 handles a few more events than 3 and 4 */
2497static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs)
2498{
2499 struct net_device *dev = dev_id;
2500 struct cas *cp = netdev_priv(dev);
2501 unsigned long flags;
2502 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2503
2504 /* check for shared interrupt */
2505 if (status == 0)
2506 return IRQ_NONE;
2507
2508 spin_lock_irqsave(&cp->lock, flags);
2509 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2510#ifdef USE_NAPI
2511 cas_mask_intr(cp);
2512 netif_rx_schedule(dev);
2513#else
2514 cas_rx_ringN(cp, 1, 0);
2515#endif
2516 status &= ~INTR_RX_DONE_ALT;
2517 }
2518 if (status)
2519 cas_handle_irq1(cp, status);
2520 spin_unlock_irqrestore(&cp->lock, flags);
2521 return IRQ_HANDLED;
2522}
2523#endif
2524
2525static inline void cas_handle_irq(struct net_device *dev,
2526 struct cas *cp, const u32 status)
2527{
2528 /* housekeeping interrupts */
2529 if (status & INTR_ERROR_MASK)
2530 cas_abnormal_irq(dev, cp, status);
2531
2532 if (status & INTR_RX_BUF_UNAVAIL) {
2533 /* Frame arrived, no free RX buffers available.
2534 * NOTE: we can get this on a link transition.
2535 */
2536 cas_post_rxds_ringN(cp, 0, 0);
2537 spin_lock(&cp->stat_lock[0]);
2538 cp->net_stats[0].rx_dropped++;
2539 spin_unlock(&cp->stat_lock[0]);
2540 } else if (status & INTR_RX_BUF_AE) {
2541 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2542 RX_AE_FREEN_VAL(0));
2543 }
2544
2545 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2546 cas_post_rxcs_ringN(dev, cp, 0);
2547}
2548
2549static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2550{
2551 struct net_device *dev = dev_id;
2552 struct cas *cp = netdev_priv(dev);
2553 unsigned long flags;
2554 u32 status = readl(cp->regs + REG_INTR_STATUS);
2555
2556 if (status == 0)
2557 return IRQ_NONE;
2558
2559 spin_lock_irqsave(&cp->lock, flags);
2560 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2561 cas_tx(dev, cp, status);
2562 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2563 }
2564
2565 if (status & INTR_RX_DONE) {
2566#ifdef USE_NAPI
2567 cas_mask_intr(cp);
2568 netif_rx_schedule(dev);
2569#else
2570 cas_rx_ringN(cp, 0, 0);
2571#endif
2572 status &= ~INTR_RX_DONE;
2573 }
2574
2575 if (status)
2576 cas_handle_irq(dev, cp, status);
2577 spin_unlock_irqrestore(&cp->lock, flags);
2578 return IRQ_HANDLED;
2579}
2580
2581
2582#ifdef USE_NAPI
2583static int cas_poll(struct net_device *dev, int *budget)
2584{
2585 struct cas *cp = netdev_priv(dev);
2586 int i, enable_intr, todo, credits;
2587 u32 status = readl(cp->regs + REG_INTR_STATUS);
2588 unsigned long flags;
2589
2590 spin_lock_irqsave(&cp->lock, flags);
2591 cas_tx(dev, cp, status);
2592 spin_unlock_irqrestore(&cp->lock, flags);
2593
2594 /* NAPI rx packets. we spread the credits across all of the
2595 * rxc rings
2596 */
2597 todo = min(*budget, dev->quota);
2598
2599 /* to make sure we're fair with the work we loop through each
2600 * ring N_RX_COMP_RING times with a request of
2601 * todo / N_RX_COMP_RINGS
2602 */
2603 enable_intr = 1;
2604 credits = 0;
2605 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2606 int j;
2607 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2608 credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS);
2609 if (credits >= todo) {
2610 enable_intr = 0;
2611 goto rx_comp;
2612 }
2613 }
2614 }
2615
2616rx_comp:
2617 *budget -= credits;
2618 dev->quota -= credits;
2619
2620 /* final rx completion */
2621 spin_lock_irqsave(&cp->lock, flags);
2622 if (status)
2623 cas_handle_irq(dev, cp, status);
2624
2625#ifdef USE_PCI_INTB
2626 if (N_RX_COMP_RINGS > 1) {
2627 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2628 if (status)
2629 cas_handle_irq1(dev, cp, status);
2630 }
2631#endif
2632
2633#ifdef USE_PCI_INTC
2634 if (N_RX_COMP_RINGS > 2) {
2635 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2636 if (status)
2637 cas_handle_irqN(dev, cp, status, 2);
2638 }
2639#endif
2640
2641#ifdef USE_PCI_INTD
2642 if (N_RX_COMP_RINGS > 3) {
2643 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2644 if (status)
2645 cas_handle_irqN(dev, cp, status, 3);
2646 }
2647#endif
2648 spin_unlock_irqrestore(&cp->lock, flags);
2649 if (enable_intr) {
2650 netif_rx_complete(dev);
2651 cas_unmask_intr(cp);
2652 return 0;
2653 }
2654 return 1;
2655}
2656#endif
2657
2658#ifdef CONFIG_NET_POLL_CONTROLLER
2659static void cas_netpoll(struct net_device *dev)
2660{
2661 struct cas *cp = netdev_priv(dev);
2662
2663 cas_disable_irq(cp, 0);
2664 cas_interrupt(cp->pdev->irq, dev, NULL);
2665 cas_enable_irq(cp, 0);
2666
2667#ifdef USE_PCI_INTB
2668 if (N_RX_COMP_RINGS > 1) {
2669 /* cas_interrupt1(); */
2670 }
2671#endif
2672#ifdef USE_PCI_INTC
2673 if (N_RX_COMP_RINGS > 2) {
2674 /* cas_interruptN(); */
2675 }
2676#endif
2677#ifdef USE_PCI_INTD
2678 if (N_RX_COMP_RINGS > 3) {
2679 /* cas_interruptN(); */
2680 }
2681#endif
2682}
2683#endif
2684
2685static void cas_tx_timeout(struct net_device *dev)
2686{
2687 struct cas *cp = netdev_priv(dev);
2688
2689 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2690 if (!cp->hw_running) {
2691 printk("%s: hrm.. hw not running!\n", dev->name);
2692 return;
2693 }
2694
2695 printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
2696 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
2697
2698 printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
2699 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
2700
2701 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
2702 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2703 dev->name,
2704 readl(cp->regs + REG_TX_CFG),
2705 readl(cp->regs + REG_MAC_TX_STATUS),
2706 readl(cp->regs + REG_MAC_TX_CFG),
2707 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2708 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2709 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2710 readl(cp->regs + REG_TX_SM_1),
2711 readl(cp->regs + REG_TX_SM_2));
2712
2713 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
2714 dev->name,
2715 readl(cp->regs + REG_RX_CFG),
2716 readl(cp->regs + REG_MAC_RX_STATUS),
2717 readl(cp->regs + REG_MAC_RX_CFG));
2718
2719 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
2720 dev->name,
2721 readl(cp->regs + REG_HP_STATE_MACHINE),
2722 readl(cp->regs + REG_HP_STATUS0),
2723 readl(cp->regs + REG_HP_STATUS1),
2724 readl(cp->regs + REG_HP_STATUS2));
2725
2726#if 1
2727 atomic_inc(&cp->reset_task_pending);
2728 atomic_inc(&cp->reset_task_pending_all);
2729 schedule_work(&cp->reset_task);
2730#else
2731 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2732 schedule_work(&cp->reset_task);
2733#endif
2734}
2735
2736static inline int cas_intme(int ring, int entry)
2737{
2738 /* Algorithm: IRQ every 1/2 of descriptors. */
2739 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2740 return 1;
2741 return 0;
2742}
2743
2744
2745static void cas_write_txd(struct cas *cp, int ring, int entry,
2746 dma_addr_t mapping, int len, u64 ctrl, int last)
2747{
2748 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2749
2750 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2751 if (cas_intme(ring, entry))
2752 ctrl |= TX_DESC_INTME;
2753 if (last)
2754 ctrl |= TX_DESC_EOF;
2755 txd->control = cpu_to_le64(ctrl);
2756 txd->buffer = cpu_to_le64(mapping);
2757}
2758
2759static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2760 const int entry)
2761{
2762 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2763}
2764
2765static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2766 const int entry, const int tentry)
2767{
2768 cp->tx_tiny_use[ring][tentry].nbufs++;
2769 cp->tx_tiny_use[ring][entry].used = 1;
2770 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2771}
2772
2773static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2774 struct sk_buff *skb)
2775{
2776 struct net_device *dev = cp->dev;
2777 int entry, nr_frags, frag, tabort, tentry;
2778 dma_addr_t mapping;
2779 unsigned long flags;
2780 u64 ctrl;
2781 u32 len;
2782
2783 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2784
2785 /* This is a hard error, log it. */
2786 if (TX_BUFFS_AVAIL(cp, ring) <=
2787 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2788 netif_stop_queue(dev);
2789 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2790 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
2791 "queue awake!\n", dev->name);
2792 return 1;
2793 }
2794
2795 ctrl = 0;
2796 if (skb->ip_summed == CHECKSUM_HW) {
2797 u64 csum_start_off, csum_stuff_off;
2798
2799 csum_start_off = (u64) (skb->h.raw - skb->data);
2800 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
2801
2802 ctrl = TX_DESC_CSUM_EN |
2803 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2804 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2805 }
2806
2807 entry = cp->tx_new[ring];
2808 cp->tx_skbs[ring][entry] = skb;
2809
2810 nr_frags = skb_shinfo(skb)->nr_frags;
2811 len = skb_headlen(skb);
2812 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2813 offset_in_page(skb->data), len,
2814 PCI_DMA_TODEVICE);
2815
2816 tentry = entry;
2817 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2818 if (unlikely(tabort)) {
2819 /* NOTE: len is always > tabort */
2820 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2821 ctrl | TX_DESC_SOF, 0);
2822 entry = TX_DESC_NEXT(ring, entry);
2823
2824 memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
2825 len - tabort, tabort);
2826 mapping = tx_tiny_map(cp, ring, entry, tentry);
2827 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2828 (nr_frags == 0));
2829 } else {
2830 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2831 TX_DESC_SOF, (nr_frags == 0));
2832 }
2833 entry = TX_DESC_NEXT(ring, entry);
2834
2835 for (frag = 0; frag < nr_frags; frag++) {
2836 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2837
2838 len = fragp->size;
2839 mapping = pci_map_page(cp->pdev, fragp->page,
2840 fragp->page_offset, len,
2841 PCI_DMA_TODEVICE);
2842
2843 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2844 if (unlikely(tabort)) {
2845 void *addr;
2846
2847 /* NOTE: len is always > tabort */
2848 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2849 ctrl, 0);
2850 entry = TX_DESC_NEXT(ring, entry);
2851
2852 addr = cas_page_map(fragp->page);
2853 memcpy(tx_tiny_buf(cp, ring, entry),
2854 addr + fragp->page_offset + len - tabort,
2855 tabort);
2856 cas_page_unmap(addr);
2857 mapping = tx_tiny_map(cp, ring, entry, tentry);
2858 len = tabort;
2859 }
2860
2861 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2862 (frag + 1 == nr_frags));
2863 entry = TX_DESC_NEXT(ring, entry);
2864 }
2865
2866 cp->tx_new[ring] = entry;
2867 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2868 netif_stop_queue(dev);
2869
2870 if (netif_msg_tx_queued(cp))
2871 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
2872 "avail %d\n",
2873 dev->name, ring, entry, skb->len,
2874 TX_BUFFS_AVAIL(cp, ring));
2875 writel(entry, cp->regs + REG_TX_KICKN(ring));
2876 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2877 return 0;
2878}
2879
2880static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2881{
2882 struct cas *cp = netdev_priv(dev);
2883
2884 /* this is only used as a load-balancing hint, so it doesn't
2885 * need to be SMP safe
2886 */
2887 static int ring;
2888
2889 skb = skb_padto(skb, cp->min_frame_size);
2890 if (!skb)
2891 return 0;
2892
2893 /* XXX: we need some higher-level QoS hooks to steer packets to
2894 * individual queues.
2895 */
2896 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2897 return 1;
2898 dev->trans_start = jiffies;
2899 return 0;
2900}
2901
2902static void cas_init_tx_dma(struct cas *cp)
2903{
2904 u64 desc_dma = cp->block_dvma;
2905 unsigned long off;
2906 u32 val;
2907 int i;
2908
2909 /* set up tx completion writeback registers. must be 8-byte aligned */
2910#ifdef USE_TX_COMPWB
2911 off = offsetof(struct cas_init_block, tx_compwb);
2912 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2913 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2914#endif
2915
2916 /* enable completion writebacks, enable paced mode,
2917 * disable read pipe, and disable pre-interrupt compwbs
2918 */
2919 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2920 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2921 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2922 TX_CFG_INTR_COMPWB_DIS;
2923
2924 /* write out tx ring info and tx desc bases */
2925 for (i = 0; i < MAX_TX_RINGS; i++) {
2926 off = (unsigned long) cp->init_txds[i] -
2927 (unsigned long) cp->init_block;
2928
2929 val |= CAS_TX_RINGN_BASE(i);
2930 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2931 writel((desc_dma + off) & 0xffffffff, cp->regs +
2932 REG_TX_DBN_LOW(i));
2933 /* don't zero out the kick register here as the system
2934 * will wedge
2935 */
2936 }
2937 writel(val, cp->regs + REG_TX_CFG);
2938
2939 /* program max burst sizes. these numbers should be different
2940 * if doing QoS.
2941 */
2942#ifdef USE_QOS
2943 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2944 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2945 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2946 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2947#else
2948 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2949 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2950 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2951 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2952#endif
2953}
2954
2955/* Must be invoked under cp->lock. */
2956static inline void cas_init_dma(struct cas *cp)
2957{
2958 cas_init_tx_dma(cp);
2959 cas_init_rx_dma(cp);
2960}
2961
2962/* Must be invoked under cp->lock. */
2963static u32 cas_setup_multicast(struct cas *cp)
2964{
2965 u32 rxcfg = 0;
2966 int i;
2967
2968 if (cp->dev->flags & IFF_PROMISC) {
2969 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2970
2971 } else if (cp->dev->flags & IFF_ALLMULTI) {
2972 for (i=0; i < 16; i++)
2973 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2974 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2975
2976 } else {
2977 u16 hash_table[16];
2978 u32 crc;
2979 struct dev_mc_list *dmi = cp->dev->mc_list;
2980 int i;
2981
2982 /* use the alternate mac address registers for the
2983 * first 15 multicast addresses
2984 */
2985 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
2986 if (!dmi) {
2987 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
2988 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
2989 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
2990 continue;
2991 }
2992 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
2993 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2994 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
2995 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2996 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
2997 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2998 dmi = dmi->next;
2999 }
3000
3001 /* use hw hash table for the next series of
3002 * multicast addresses
3003 */
3004 memset(hash_table, 0, sizeof(hash_table));
3005 while (dmi) {
3006 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
3007 crc >>= 24;
3008 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
3009 dmi = dmi->next;
3010 }
3011 for (i=0; i < 16; i++)
3012 writel(hash_table[i], cp->regs +
3013 REG_MAC_HASH_TABLEN(i));
3014 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3015 }
3016
3017 return rxcfg;
3018}
3019
3020/* must be invoked under cp->stat_lock[N_TX_RINGS] */
3021static void cas_clear_mac_err(struct cas *cp)
3022{
3023 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3024 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3025 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3026 writel(0, cp->regs + REG_MAC_COLL_LATE);
3027 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3028 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3029 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3030 writel(0, cp->regs + REG_MAC_LEN_ERR);
3031 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3032 writel(0, cp->regs + REG_MAC_FCS_ERR);
3033 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3034}
3035
3036
3037static void cas_mac_reset(struct cas *cp)
3038{
3039 int i;
3040
3041 /* do both TX and RX reset */
3042 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3043 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3044
3045 /* wait for TX */
3046 i = STOP_TRIES;
3047 while (i-- > 0) {
3048 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3049 break;
3050 udelay(10);
3051 }
3052
3053 /* wait for RX */
3054 i = STOP_TRIES;
3055 while (i-- > 0) {
3056 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3057 break;
3058 udelay(10);
3059 }
3060
3061 if (readl(cp->regs + REG_MAC_TX_RESET) |
3062 readl(cp->regs + REG_MAC_RX_RESET))
3063 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
3064 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
3065 readl(cp->regs + REG_MAC_RX_RESET),
3066 readl(cp->regs + REG_MAC_STATE_MACHINE));
3067}
3068
3069
3070/* Must be invoked under cp->lock. */
3071static void cas_init_mac(struct cas *cp)
3072{
3073 unsigned char *e = &cp->dev->dev_addr[0];
3074 int i;
3075#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
3076 u32 rxcfg;
3077#endif
3078 cas_mac_reset(cp);
3079
3080 /* setup core arbitration weight register */
3081 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3082
3083 /* XXX Use pci_dma_burst_advice() */
3084#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3085 /* set the infinite burst register for chips that don't have
3086 * pci issues.
3087 */
3088 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3089 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3090#endif
3091
3092 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3093
3094 writel(0x00, cp->regs + REG_MAC_IPG0);
3095 writel(0x08, cp->regs + REG_MAC_IPG1);
3096 writel(0x04, cp->regs + REG_MAC_IPG2);
3097
3098 /* change later for 802.3z */
3099 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3100
3101 /* min frame + FCS */
3102 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3103
3104 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3105 * specify the maximum frame size to prevent RX tag errors on
3106 * oversized frames.
3107 */
3108 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3109 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3110 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3111 cp->regs + REG_MAC_FRAMESIZE_MAX);
3112
3113 /* NOTE: crc_size is used as a surrogate for half-duplex.
3114 * workaround saturn half-duplex issue by increasing preamble
3115 * size to 65 bytes.
3116 */
3117 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3118 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3119 else
3120 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3121 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3122 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3123 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3124
3125 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3126
3127 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3128 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3129 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3130 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3131 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3132
3133 /* setup mac address in perfect filter array */
3134 for (i = 0; i < 45; i++)
3135 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3136
3137 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3138 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3139 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3140
3141 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3142 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3143 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3144
3145#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
3146 cp->mac_rx_cfg = cas_setup_multicast(cp);
3147#else
3148 /* WTZ: Do what Adrian did in cas_set_multicast. Doing
3149 * a writel does not seem to be necessary because Cassini
3150 * seems to preserve the configuration when we do the reset.
3151 * If the chip is in trouble, though, it is not clear if we
3152 * can really count on this behavior. cas_set_multicast uses
3153 * spin_lock_irqsave, but we are called only in cas_init_hw and
3154 * cas_init_hw is protected by cas_lock_all, which calls
3155 * spin_lock_irq (so it doesn't need to save the flags, and
3156 * we should be OK for the writel, as that is the only
3157 * difference).
3158 */
3159 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
3160 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
3161#endif
3162 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3163 cas_clear_mac_err(cp);
3164 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3165
3166 /* Setup MAC interrupts. We want to get all of the interesting
3167 * counter expiration events, but we do not want to hear about
3168 * normal rx/tx as the DMA engine tells us that.
3169 */
3170 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3171 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3172
3173 /* Don't enable even the PAUSE interrupts for now, we
3174 * make no use of those events other than to record them.
3175 */
3176 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3177}
3178
3179/* Must be invoked under cp->lock. */
3180static void cas_init_pause_thresholds(struct cas *cp)
3181{
3182 /* Calculate pause thresholds. Setting the OFF threshold to the
3183 * full RX fifo size effectively disables PAUSE generation
3184 */
3185 if (cp->rx_fifo_size <= (2 * 1024)) {
3186 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3187 } else {
3188 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3189 if (max_frame * 3 > cp->rx_fifo_size) {
3190 cp->rx_pause_off = 7104;
3191 cp->rx_pause_on = 960;
3192 } else {
3193 int off = (cp->rx_fifo_size - (max_frame * 2));
3194 int on = off - max_frame;
3195 cp->rx_pause_off = off;
3196 cp->rx_pause_on = on;
3197 }
3198 }
3199}
3200
3201static int cas_vpd_match(const void __iomem *p, const char *str)
3202{
3203 int len = strlen(str) + 1;
3204 int i;
3205
3206 for (i = 0; i < len; i++) {
3207 if (readb(p + i) != str[i])
3208 return 0;
3209 }
3210 return 1;
3211}
3212
3213
3214/* get the mac address by reading the vpd information in the rom.
3215 * also get the phy type and determine if there's an entropy generator.
3216 * NOTE: this is a bit convoluted for the following reasons:
3217 * 1) vpd info has order-dependent mac addresses for multinic cards
3218 * 2) the only way to determine the nic order is to use the slot
3219 * number.
3220 * 3) fiber cards don't have bridges, so their slot numbers don't
3221 * mean anything.
3222 * 4) we don't actually know we have a fiber card until after
3223 * the mac addresses are parsed.
3224 */
3225static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3226 const int offset)
3227{
3228 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3229 void __iomem *base, *kstart;
3230 int i, len;
3231 int found = 0;
3232#define VPD_FOUND_MAC 0x01
3233#define VPD_FOUND_PHY 0x02
3234
3235 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3236 int mac_off = 0;
3237
3238 /* give us access to the PROM */
3239 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3240 cp->regs + REG_BIM_LOCAL_DEV_EN);
3241
3242 /* check for an expansion rom */
3243 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3244 goto use_random_mac_addr;
3245
3246 /* search for beginning of vpd */
3247 base = NULL;
3248 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3249 /* check for PCIR */
3250 if ((readb(p + i + 0) == 0x50) &&
3251 (readb(p + i + 1) == 0x43) &&
3252 (readb(p + i + 2) == 0x49) &&
3253 (readb(p + i + 3) == 0x52)) {
3254 base = p + (readb(p + i + 8) |
3255 (readb(p + i + 9) << 8));
3256 break;
3257 }
3258 }
3259
3260 if (!base || (readb(base) != 0x82))
3261 goto use_random_mac_addr;
3262
3263 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3264 while (i < EXPANSION_ROM_SIZE) {
3265 if (readb(base + i) != 0x90) /* no vpd found */
3266 goto use_random_mac_addr;
3267
3268 /* found a vpd field */
3269 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3270
3271 /* extract keywords */
3272 kstart = base + i + 3;
3273 p = kstart;
3274 while ((p - kstart) < len) {
3275 int klen = readb(p + 2);
3276 int j;
3277 char type;
3278
3279 p += 3;
3280
3281 /* look for the following things:
3282 * -- correct length == 29
3283 * 3 (type) + 2 (size) +
3284 * 18 (strlen("local-mac-address") + 1) +
3285 * 6 (mac addr)
3286 * -- VPD Instance 'I'
3287 * -- VPD Type Bytes 'B'
3288 * -- VPD data length == 6
3289 * -- property string == local-mac-address
3290 *
3291 * -- correct length == 24
3292 * 3 (type) + 2 (size) +
3293 * 12 (strlen("entropy-dev") + 1) +
3294 * 7 (strlen("vms110") + 1)
3295 * -- VPD Instance 'I'
3296 * -- VPD Type String 'B'
3297 * -- VPD data length == 7
3298 * -- property string == entropy-dev
3299 *
3300 * -- correct length == 18
3301 * 3 (type) + 2 (size) +
3302 * 9 (strlen("phy-type") + 1) +
3303 * 4 (strlen("pcs") + 1)
3304 * -- VPD Instance 'I'
3305 * -- VPD Type String 'S'
3306 * -- VPD data length == 4
3307 * -- property string == phy-type
3308 *
3309 * -- correct length == 23
3310 * 3 (type) + 2 (size) +
3311 * 14 (strlen("phy-interface") + 1) +
3312 * 4 (strlen("pcs") + 1)
3313 * -- VPD Instance 'I'
3314 * -- VPD Type String 'S'
3315 * -- VPD data length == 4
3316 * -- property string == phy-interface
3317 */
3318 if (readb(p) != 'I')
3319 goto next;
3320
3321 /* finally, check string and length */
3322 type = readb(p + 3);
3323 if (type == 'B') {
3324 if ((klen == 29) && readb(p + 4) == 6 &&
3325 cas_vpd_match(p + 5,
3326 "local-mac-address")) {
3327 if (mac_off++ > offset)
3328 goto next;
3329
3330 /* set mac address */
3331 for (j = 0; j < 6; j++)
3332 dev_addr[j] =
3333 readb(p + 23 + j);
3334 goto found_mac;
3335 }
3336 }
3337
3338 if (type != 'S')
3339 goto next;
3340
3341#ifdef USE_ENTROPY_DEV
3342 if ((klen == 24) &&
3343 cas_vpd_match(p + 5, "entropy-dev") &&
3344 cas_vpd_match(p + 17, "vms110")) {
3345 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3346 goto next;
3347 }
3348#endif
3349
3350 if (found & VPD_FOUND_PHY)
3351 goto next;
3352
3353 if ((klen == 18) && readb(p + 4) == 4 &&
3354 cas_vpd_match(p + 5, "phy-type")) {
3355 if (cas_vpd_match(p + 14, "pcs")) {
3356 phy_type = CAS_PHY_SERDES;
3357 goto found_phy;
3358 }
3359 }
3360
3361 if ((klen == 23) && readb(p + 4) == 4 &&
3362 cas_vpd_match(p + 5, "phy-interface")) {
3363 if (cas_vpd_match(p + 19, "pcs")) {
3364 phy_type = CAS_PHY_SERDES;
3365 goto found_phy;
3366 }
3367 }
3368found_mac:
3369 found |= VPD_FOUND_MAC;
3370 goto next;
3371
3372found_phy:
3373 found |= VPD_FOUND_PHY;
3374
3375next:
3376 p += klen;
3377 }
3378 i += len + 3;
3379 }
3380
3381use_random_mac_addr:
3382 if (found & VPD_FOUND_MAC)
3383 goto done;
3384
3385 /* Sun MAC prefix then 3 random bytes. */
3386 printk(PFX "MAC address not found in ROM VPD\n");
3387 dev_addr[0] = 0x08;
3388 dev_addr[1] = 0x00;
3389 dev_addr[2] = 0x20;
3390 get_random_bytes(dev_addr + 3, 3);
3391
3392done:
3393 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3394 return phy_type;
3395}
3396
3397/* check pci invariants */
3398static void cas_check_pci_invariants(struct cas *cp)
3399{
3400 struct pci_dev *pdev = cp->pdev;
3401 u8 rev;
3402
3403 cp->cas_flags = 0;
3404 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
3405 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3406 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3407 if (rev >= CAS_ID_REVPLUS)
3408 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3409 if (rev < CAS_ID_REVPLUS02u)
3410 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3411
3412 /* Original Cassini supports HW CSUM, but it's not
3413 * enabled by default as it can trigger TX hangs.
3414 */
3415 if (rev < CAS_ID_REV2)
3416 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3417 } else {
3418 /* Only sun has original cassini chips. */
3419 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3420
3421 /* We use a flag because the same phy might be externally
3422 * connected.
3423 */
3424 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3425 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3426 cp->cas_flags |= CAS_FLAG_SATURN;
3427 }
3428}
3429
3430
3431static int cas_check_invariants(struct cas *cp)
3432{
3433 struct pci_dev *pdev = cp->pdev;
3434 u32 cfg;
3435 int i;
3436
3437 /* get page size for rx buffers. */
3438 cp->page_order = 0;
3439#ifdef USE_PAGE_ORDER
3440 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3441 /* see if we can allocate larger pages */
3442 struct page *page = alloc_pages(GFP_ATOMIC,
3443 CAS_JUMBO_PAGE_SHIFT -
3444 PAGE_SHIFT);
3445 if (page) {
3446 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3447 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3448 } else {
3449 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
3450 }
3451 }
3452#endif
3453 cp->page_size = (PAGE_SIZE << cp->page_order);
3454
3455 /* Fetch the FIFO configurations. */
3456 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3457 cp->rx_fifo_size = RX_FIFO_SIZE;
3458
3459 /* finish phy determination. MDIO1 takes precedence over MDIO0 if
3460 * they're both connected.
3461 */
3462 cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
3463 PCI_SLOT(pdev->devfn));
3464 if (cp->phy_type & CAS_PHY_SERDES) {
3465 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3466 return 0; /* no more checking needed */
3467 }
3468
3469 /* MII */
3470 cfg = readl(cp->regs + REG_MIF_CFG);
3471 if (cfg & MIF_CFG_MDIO_1) {
3472 cp->phy_type = CAS_PHY_MII_MDIO1;
3473 } else if (cfg & MIF_CFG_MDIO_0) {
3474 cp->phy_type = CAS_PHY_MII_MDIO0;
3475 }
3476
3477 cas_mif_poll(cp, 0);
3478 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3479
3480 for (i = 0; i < 32; i++) {
3481 u32 phy_id;
3482 int j;
3483
3484 for (j = 0; j < 3; j++) {
3485 cp->phy_addr = i;
3486 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3487 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3488 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3489 cp->phy_id = phy_id;
3490 goto done;
3491 }
3492 }
3493 }
3494 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
3495 readl(cp->regs + REG_MIF_STATE_MACHINE));
3496 return -1;
3497
3498done:
3499 /* see if we can do gigabit */
3500 cfg = cas_phy_read(cp, MII_BMSR);
3501 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3502 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3503 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3504 return 0;
3505}
3506
3507/* Must be invoked under cp->lock. */
3508static inline void cas_start_dma(struct cas *cp)
3509{
3510 int i;
3511 u32 val;
3512 int txfailed = 0;
3513
3514 /* enable dma */
3515 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3516 writel(val, cp->regs + REG_TX_CFG);
3517 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3518 writel(val, cp->regs + REG_RX_CFG);
3519
3520 /* enable the mac */
3521 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3522 writel(val, cp->regs + REG_MAC_TX_CFG);
3523 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3524 writel(val, cp->regs + REG_MAC_RX_CFG);
3525
3526 i = STOP_TRIES;
3527 while (i-- > 0) {
3528 val = readl(cp->regs + REG_MAC_TX_CFG);
3529 if ((val & MAC_TX_CFG_EN))
3530 break;
3531 udelay(10);
3532 }
3533 if (i < 0) txfailed = 1;
3534 i = STOP_TRIES;
3535 while (i-- > 0) {
3536 val = readl(cp->regs + REG_MAC_RX_CFG);
3537 if ((val & MAC_RX_CFG_EN)) {
3538 if (txfailed) {
3539 printk(KERN_ERR
3540 "%s: enabling mac failed [tx:%08x:%08x].\n",
3541 cp->dev->name,
3542 readl(cp->regs + REG_MIF_STATE_MACHINE),
3543 readl(cp->regs + REG_MAC_STATE_MACHINE));
3544 }
3545 goto enable_rx_done;
3546 }
3547 udelay(10);
3548 }
3549 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
3550 cp->dev->name,
3551 (txfailed? "tx,rx":"rx"),
3552 readl(cp->regs + REG_MIF_STATE_MACHINE),
3553 readl(cp->regs + REG_MAC_STATE_MACHINE));
3554
3555enable_rx_done:
3556 cas_unmask_intr(cp); /* enable interrupts */
3557 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3558 writel(0, cp->regs + REG_RX_COMP_TAIL);
3559
3560 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3561 if (N_RX_DESC_RINGS > 1)
3562 writel(RX_DESC_RINGN_SIZE(1) - 4,
3563 cp->regs + REG_PLUS_RX_KICK1);
3564
3565 for (i = 1; i < N_RX_COMP_RINGS; i++)
3566 writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
3567 }
3568}
3569
3570/* Must be invoked under cp->lock. */
3571static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3572 int *pause)
3573{
3574 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3575 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3576 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3577 if (val & PCS_MII_LPA_ASYM_PAUSE)
3578 *pause |= 0x10;
3579 *spd = 1000;
3580}
3581
3582/* Must be invoked under cp->lock. */
3583static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3584 int *pause)
3585{
3586 u32 val;
3587
3588 *fd = 0;
3589 *spd = 10;
3590 *pause = 0;
3591
3592 /* use GMII registers */
3593 val = cas_phy_read(cp, MII_LPA);
3594 if (val & CAS_LPA_PAUSE)
3595 *pause = 0x01;
3596
3597 if (val & CAS_LPA_ASYM_PAUSE)
3598 *pause |= 0x10;
3599
3600 if (val & LPA_DUPLEX)
3601 *fd = 1;
3602 if (val & LPA_100)
3603 *spd = 100;
3604
3605 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3606 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3607 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3608 *spd = 1000;
3609 if (val & CAS_LPA_1000FULL)
3610 *fd = 1;
3611 }
3612}
3613
3614/* A link-up condition has occurred, initialize and enable the
3615 * rest of the chip.
3616 *
3617 * Must be invoked under cp->lock.
3618 */
3619static void cas_set_link_modes(struct cas *cp)
3620{
3621 u32 val;
3622 int full_duplex, speed, pause;
3623
3624 full_duplex = 0;
3625 speed = 10;
3626 pause = 0;
3627
3628 if (CAS_PHY_MII(cp->phy_type)) {
3629 cas_mif_poll(cp, 0);
3630 val = cas_phy_read(cp, MII_BMCR);
3631 if (val & BMCR_ANENABLE) {
3632 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3633 &pause);
3634 } else {
3635 if (val & BMCR_FULLDPLX)
3636 full_duplex = 1;
3637
3638 if (val & BMCR_SPEED100)
3639 speed = 100;
3640 else if (val & CAS_BMCR_SPEED1000)
3641 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3642 1000 : 100;
3643 }
3644 cas_mif_poll(cp, 1);
3645
3646 } else {
3647 val = readl(cp->regs + REG_PCS_MII_CTRL);
3648 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3649 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3650 if (val & PCS_MII_CTRL_DUPLEX)
3651 full_duplex = 1;
3652 }
3653 }
3654
3655 if (netif_msg_link(cp))
3656 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
3657 cp->dev->name, speed, (full_duplex ? "full" : "half"));
3658
3659 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3660 if (CAS_PHY_MII(cp->phy_type)) {
3661 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3662 if (!full_duplex)
3663 val |= MAC_XIF_DISABLE_ECHO;
3664 }
3665 if (full_duplex)
3666 val |= MAC_XIF_FDPLX_LED;
3667 if (speed == 1000)
3668 val |= MAC_XIF_GMII_MODE;
3669 writel(val, cp->regs + REG_MAC_XIF_CFG);
3670
3671 /* deal with carrier and collision detect. */
3672 val = MAC_TX_CFG_IPG_EN;
3673 if (full_duplex) {
3674 val |= MAC_TX_CFG_IGNORE_CARRIER;
3675 val |= MAC_TX_CFG_IGNORE_COLL;
3676 } else {
3677#ifndef USE_CSMA_CD_PROTO
3678 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3679 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3680#endif
3681 }
3682 /* val now set up for REG_MAC_TX_CFG */
3683
3684 /* If gigabit and half-duplex, enable carrier extension
3685 * mode. increase slot time to 512 bytes as well.
3686 * else, disable it and make sure slot time is 64 bytes.
3687 * also activate checksum bug workaround
3688 */
3689 if ((speed == 1000) && !full_duplex) {
3690 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3691 cp->regs + REG_MAC_TX_CFG);
3692
3693 val = readl(cp->regs + REG_MAC_RX_CFG);
3694 val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
3695 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3696 cp->regs + REG_MAC_RX_CFG);
3697
3698 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3699
3700 cp->crc_size = 4;
3701 /* minimum size gigabit frame at half duplex */
3702 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3703
3704 } else {
3705 writel(val, cp->regs + REG_MAC_TX_CFG);
3706
3707 /* checksum bug workaround. don't strip FCS when in
3708 * half-duplex mode
3709 */
3710 val = readl(cp->regs + REG_MAC_RX_CFG);
3711 if (full_duplex) {
3712 val |= MAC_RX_CFG_STRIP_FCS;
3713 cp->crc_size = 0;
3714 cp->min_frame_size = CAS_MIN_MTU;
3715 } else {
3716 val &= ~MAC_RX_CFG_STRIP_FCS;
3717 cp->crc_size = 4;
3718 cp->min_frame_size = CAS_MIN_FRAME;
3719 }
3720 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3721 cp->regs + REG_MAC_RX_CFG);
3722 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3723 }
3724
3725 if (netif_msg_link(cp)) {
3726 if (pause & 0x01) {
3727 printk(KERN_INFO "%s: Pause is enabled "
3728 "(rxfifo: %d off: %d on: %d)\n",
3729 cp->dev->name,
3730 cp->rx_fifo_size,
3731 cp->rx_pause_off,
3732 cp->rx_pause_on);
3733 } else if (pause & 0x10) {
3734 printk(KERN_INFO "%s: TX pause enabled\n",
3735 cp->dev->name);
3736 } else {
3737 printk(KERN_INFO "%s: Pause is disabled\n",
3738 cp->dev->name);
3739 }
3740 }
3741
3742 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3743 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3744 if (pause) { /* symmetric or asymmetric pause */
3745 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3746 if (pause & 0x01) { /* symmetric pause */
3747 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3748 }
3749 }
3750 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3751 cas_start_dma(cp);
3752}
3753
3754/* Must be invoked under cp->lock. */
3755static void cas_init_hw(struct cas *cp, int restart_link)
3756{
3757 if (restart_link)
3758 cas_phy_init(cp);
3759
3760 cas_init_pause_thresholds(cp);
3761 cas_init_mac(cp);
3762 cas_init_dma(cp);
3763
3764 if (restart_link) {
3765 /* Default aneg parameters */
3766 cp->timer_ticks = 0;
3767 cas_begin_auto_negotiation(cp, NULL);
3768 } else if (cp->lstate == link_up) {
3769 cas_set_link_modes(cp);
3770 netif_carrier_on(cp->dev);
3771 }
3772}
3773
3774/* Must be invoked under cp->lock. on earlier cassini boards,
3775 * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
3776 * let it settle out, and then restore pci state.
3777 */
3778static void cas_hard_reset(struct cas *cp)
3779{
3780 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3781 udelay(20);
3782 pci_restore_state(cp->pdev);
3783}
3784
3785
3786static void cas_global_reset(struct cas *cp, int blkflag)
3787{
3788 int limit;
3789
3790 /* issue a global reset. don't use RSTOUT. */
3791 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3792 /* For PCS, when the blkflag is set, we should set the
3793 * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
3794 * the last autonegotiation from being cleared. We'll
3795 * need some special handling if the chip is set into a
3796 * loopback mode.
3797 */
3798 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3799 cp->regs + REG_SW_RESET);
3800 } else {
3801 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3802 }
3803
3804 /* need to wait at least 3ms before polling register */
3805 mdelay(3);
3806
3807 limit = STOP_TRIES;
3808 while (limit-- > 0) {
3809 u32 val = readl(cp->regs + REG_SW_RESET);
3810 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3811 goto done;
3812 udelay(10);
3813 }
3814 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
3815
3816done:
3817 /* enable various BIM interrupts */
3818 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3819 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3820
3821 /* clear out pci error status mask for handled errors.
3822 * we don't deal with DMA counter overflows as they happen
3823 * all the time.
3824 */
3825 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3826 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3827 PCI_ERR_BIM_DMA_READ), cp->regs +
3828 REG_PCI_ERR_STATUS_MASK);
3829
3830 /* set up for MII by default to address mac rx reset timeout
3831 * issue
3832 */
3833 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3834}
3835
3836static void cas_reset(struct cas *cp, int blkflag)
3837{
3838 u32 val;
3839
3840 cas_mask_intr(cp);
3841 cas_global_reset(cp, blkflag);
3842 cas_mac_reset(cp);
3843 cas_entropy_reset(cp);
3844
3845 /* disable dma engines. */
3846 val = readl(cp->regs + REG_TX_CFG);
3847 val &= ~TX_CFG_DMA_EN;
3848 writel(val, cp->regs + REG_TX_CFG);
3849
3850 val = readl(cp->regs + REG_RX_CFG);
3851 val &= ~RX_CFG_DMA_EN;
3852 writel(val, cp->regs + REG_RX_CFG);
3853
3854 /* program header parser */
3855 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3856 (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
3857 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3858 } else {
3859 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3860 }
3861
3862 /* clear out error registers */
3863 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3864 cas_clear_mac_err(cp);
3865 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3866}
3867
3868/* Shut down the chip, must be called with pm_sem held. */
3869static void cas_shutdown(struct cas *cp)
3870{
3871 unsigned long flags;
3872
3873 /* Make us not-running to avoid timers respawning */
3874 cp->hw_running = 0;
3875
3876 del_timer_sync(&cp->link_timer);
3877
3878 /* Stop the reset task */
3879#if 0
3880 while (atomic_read(&cp->reset_task_pending_mtu) ||
3881 atomic_read(&cp->reset_task_pending_spare) ||
3882 atomic_read(&cp->reset_task_pending_all))
3883 schedule();
3884
3885#else
3886 while (atomic_read(&cp->reset_task_pending))
3887 schedule();
3888#endif
3889 /* Actually stop the chip */
3890 cas_lock_all_save(cp, flags);
3891 cas_reset(cp, 0);
3892 if (cp->cas_flags & CAS_FLAG_SATURN)
3893 cas_phy_powerdown(cp);
3894 cas_unlock_all_restore(cp, flags);
3895}
3896
3897static int cas_change_mtu(struct net_device *dev, int new_mtu)
3898{
3899 struct cas *cp = netdev_priv(dev);
3900
3901 if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU)
3902 return -EINVAL;
3903
3904 dev->mtu = new_mtu;
3905 if (!netif_running(dev) || !netif_device_present(dev))
3906 return 0;
3907
3908 /* let the reset task handle it */
3909#if 1
3910 atomic_inc(&cp->reset_task_pending);
3911 if ((cp->phy_type & CAS_PHY_SERDES)) {
3912 atomic_inc(&cp->reset_task_pending_all);
3913 } else {
3914 atomic_inc(&cp->reset_task_pending_mtu);
3915 }
3916 schedule_work(&cp->reset_task);
3917#else
3918 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3919 CAS_RESET_ALL : CAS_RESET_MTU);
3920 printk(KERN_ERR "reset called in cas_change_mtu\n");
3921 schedule_work(&cp->reset_task);
3922#endif
3923
3924 flush_scheduled_work();
3925 return 0;
3926}
3927
3928static void cas_clean_txd(struct cas *cp, int ring)
3929{
3930 struct cas_tx_desc *txd = cp->init_txds[ring];
3931 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3932 u64 daddr, dlen;
3933 int i, size;
3934
3935 size = TX_DESC_RINGN_SIZE(ring);
3936 for (i = 0; i < size; i++) {
3937 int frag;
3938
3939 if (skbs[i] == NULL)
3940 continue;
3941
3942 skb = skbs[i];
3943 skbs[i] = NULL;
3944
3945 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3946 int ent = i & (size - 1);
3947
3948 /* first buffer is never a tiny buffer and so
3949 * needs to be unmapped.
3950 */
3951 daddr = le64_to_cpu(txd[ent].buffer);
3952 dlen = CAS_VAL(TX_DESC_BUFLEN,
3953 le64_to_cpu(txd[ent].control));
3954 pci_unmap_page(cp->pdev, daddr, dlen,
3955 PCI_DMA_TODEVICE);
3956
3957 if (frag != skb_shinfo(skb)->nr_frags) {
3958 i++;
3959
3960 /* next buffer might by a tiny buffer.
3961 * skip past it.
3962 */
3963 ent = i & (size - 1);
3964 if (cp->tx_tiny_use[ring][ent].used)
3965 i++;
3966 }
3967 }
3968 dev_kfree_skb_any(skb);
3969 }
3970
3971 /* zero out tiny buf usage */
3972 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3973}
3974
3975/* freed on close */
3976static inline void cas_free_rx_desc(struct cas *cp, int ring)
3977{
3978 cas_page_t **page = cp->rx_pages[ring];
3979 int i, size;
3980
3981 size = RX_DESC_RINGN_SIZE(ring);
3982 for (i = 0; i < size; i++) {
3983 if (page[i]) {
3984 cas_page_free(cp, page[i]);
3985 page[i] = NULL;
3986 }
3987 }
3988}
3989
3990static void cas_free_rxds(struct cas *cp)
3991{
3992 int i;
3993
3994 for (i = 0; i < N_RX_DESC_RINGS; i++)
3995 cas_free_rx_desc(cp, i);
3996}
3997
3998/* Must be invoked under cp->lock. */
3999static void cas_clean_rings(struct cas *cp)
4000{
4001 int i;
4002
4003 /* need to clean all tx rings */
4004 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
4005 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
4006 for (i = 0; i < N_TX_RINGS; i++)
4007 cas_clean_txd(cp, i);
4008
4009 /* zero out init block */
4010 memset(cp->init_block, 0, sizeof(struct cas_init_block));
4011 cas_clean_rxds(cp);
4012 cas_clean_rxcs(cp);
4013}
4014
4015/* allocated on open */
4016static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
4017{
4018 cas_page_t **page = cp->rx_pages[ring];
4019 int size, i = 0;
4020
4021 size = RX_DESC_RINGN_SIZE(ring);
4022 for (i = 0; i < size; i++) {
4023 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
4024 return -1;
4025 }
4026 return 0;
4027}
4028
4029static int cas_alloc_rxds(struct cas *cp)
4030{
4031 int i;
4032
4033 for (i = 0; i < N_RX_DESC_RINGS; i++) {
4034 if (cas_alloc_rx_desc(cp, i) < 0) {
4035 cas_free_rxds(cp);
4036 return -1;
4037 }
4038 }
4039 return 0;
4040}
4041
4042static void cas_reset_task(void *data)
4043{
4044 struct cas *cp = (struct cas *) data;
4045#if 0
4046 int pending = atomic_read(&cp->reset_task_pending);
4047#else
4048 int pending_all = atomic_read(&cp->reset_task_pending_all);
4049 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
4050 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
4051
4052 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
4053 /* We can have more tasks scheduled than actually
4054 * needed.
4055 */
4056 atomic_dec(&cp->reset_task_pending);
4057 return;
4058 }
4059#endif
4060 /* The link went down, we reset the ring, but keep
4061 * DMA stopped. Use this function for reset
4062 * on error as well.
4063 */
4064 if (cp->hw_running) {
4065 unsigned long flags;
4066
4067 /* Make sure we don't get interrupts or tx packets */
4068 netif_device_detach(cp->dev);
4069 cas_lock_all_save(cp, flags);
4070
4071 if (cp->opened) {
4072 /* We call cas_spare_recover when we call cas_open.
4073 * but we do not initialize the lists cas_spare_recover
4074 * uses until cas_open is called.
4075 */
4076 cas_spare_recover(cp, GFP_ATOMIC);
4077 }
4078#if 1
4079 /* test => only pending_spare set */
4080 if (!pending_all && !pending_mtu)
4081 goto done;
4082#else
4083 if (pending == CAS_RESET_SPARE)
4084 goto done;
4085#endif
4086 /* when pending == CAS_RESET_ALL, the following
4087 * call to cas_init_hw will restart auto negotiation.
4088 * Setting the second argument of cas_reset to
4089 * !(pending == CAS_RESET_ALL) will set this argument
4090 * to 1 (avoiding reinitializing the PHY for the normal
4091 * PCS case) when auto negotiation is not restarted.
4092 */
4093#if 1
4094 cas_reset(cp, !(pending_all > 0));
4095 if (cp->opened)
4096 cas_clean_rings(cp);
4097 cas_init_hw(cp, (pending_all > 0));
4098#else
4099 cas_reset(cp, !(pending == CAS_RESET_ALL));
4100 if (cp->opened)
4101 cas_clean_rings(cp);
4102 cas_init_hw(cp, pending == CAS_RESET_ALL);
4103#endif
4104
4105done:
4106 cas_unlock_all_restore(cp, flags);
4107 netif_device_attach(cp->dev);
4108 }
4109#if 1
4110 atomic_sub(pending_all, &cp->reset_task_pending_all);
4111 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4112 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4113 atomic_dec(&cp->reset_task_pending);
4114#else
4115 atomic_set(&cp->reset_task_pending, 0);
4116#endif
4117}
4118
4119static void cas_link_timer(unsigned long data)
4120{
4121 struct cas *cp = (struct cas *) data;
4122 int mask, pending = 0, reset = 0;
4123 unsigned long flags;
4124
4125 if (link_transition_timeout != 0 &&
4126 cp->link_transition_jiffies_valid &&
4127 ((jiffies - cp->link_transition_jiffies) >
4128 (link_transition_timeout))) {
4129 /* One-second counter so link-down workaround doesn't
4130 * cause resets to occur so fast as to fool the switch
4131 * into thinking the link is down.
4132 */
4133 cp->link_transition_jiffies_valid = 0;
4134 }
4135
4136 if (!cp->hw_running)
4137 return;
4138
4139 spin_lock_irqsave(&cp->lock, flags);
4140 cas_lock_tx(cp);
4141 cas_entropy_gather(cp);
4142
4143 /* If the link task is still pending, we just
4144 * reschedule the link timer
4145 */
4146#if 1
4147 if (atomic_read(&cp->reset_task_pending_all) ||
4148 atomic_read(&cp->reset_task_pending_spare) ||
4149 atomic_read(&cp->reset_task_pending_mtu))
4150 goto done;
4151#else
4152 if (atomic_read(&cp->reset_task_pending))
4153 goto done;
4154#endif
4155
4156 /* check for rx cleaning */
4157 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4158 int i, rmask;
4159
4160 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4161 rmask = CAS_FLAG_RXD_POST(i);
4162 if ((mask & rmask) == 0)
4163 continue;
4164
4165 /* post_rxds will do a mod_timer */
4166 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4167 pending = 1;
4168 continue;
4169 }
4170 cp->cas_flags &= ~rmask;
4171 }
4172 }
4173
4174 if (CAS_PHY_MII(cp->phy_type)) {
4175 u16 bmsr;
4176 cas_mif_poll(cp, 0);
4177 bmsr = cas_phy_read(cp, MII_BMSR);
4178 /* WTZ: Solaris driver reads this twice, but that
4179 * may be due to the PCS case and the use of a
4180 * common implementation. Read it twice here to be
4181 * safe.
4182 */
4183 bmsr = cas_phy_read(cp, MII_BMSR);
4184 cas_mif_poll(cp, 1);
4185 readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
4186 reset = cas_mii_link_check(cp, bmsr);
4187 } else {
4188 reset = cas_pcs_link_check(cp);
4189 }
4190
4191 if (reset)
4192 goto done;
4193
4194 /* check for tx state machine confusion */
4195 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4196 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4197 u32 wptr, rptr;
4198 int tlm = CAS_VAL(MAC_SM_TLM, val);
4199
4200 if (((tlm == 0x5) || (tlm == 0x3)) &&
4201 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4202 if (netif_msg_tx_err(cp))
4203 printk(KERN_DEBUG "%s: tx err: "
4204 "MAC_STATE[%08x]\n",
4205 cp->dev->name, val);
4206 reset = 1;
4207 goto done;
4208 }
4209
4210 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4211 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4212 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4213 if ((val == 0) && (wptr != rptr)) {
4214 if (netif_msg_tx_err(cp))
4215 printk(KERN_DEBUG "%s: tx err: "
4216 "TX_FIFO[%08x:%08x:%08x]\n",
4217 cp->dev->name, val, wptr, rptr);
4218 reset = 1;
4219 }
4220
4221 if (reset)
4222 cas_hard_reset(cp);
4223 }
4224
4225done:
4226 if (reset) {
4227#if 1
4228 atomic_inc(&cp->reset_task_pending);
4229 atomic_inc(&cp->reset_task_pending_all);
4230 schedule_work(&cp->reset_task);
4231#else
4232 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4233 printk(KERN_ERR "reset called in cas_link_timer\n");
4234 schedule_work(&cp->reset_task);
4235#endif
4236 }
4237
4238 if (!pending)
4239 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4240 cas_unlock_tx(cp);
4241 spin_unlock_irqrestore(&cp->lock, flags);
4242}
4243
4244/* tiny buffers are used to avoid target abort issues with
4245 * older cassini's
4246 */
4247static void cas_tx_tiny_free(struct cas *cp)
4248{
4249 struct pci_dev *pdev = cp->pdev;
4250 int i;
4251
4252 for (i = 0; i < N_TX_RINGS; i++) {
4253 if (!cp->tx_tiny_bufs[i])
4254 continue;
4255
4256 pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
4257 cp->tx_tiny_bufs[i],
4258 cp->tx_tiny_dvma[i]);
4259 cp->tx_tiny_bufs[i] = NULL;
4260 }
4261}
4262
4263static int cas_tx_tiny_alloc(struct cas *cp)
4264{
4265 struct pci_dev *pdev = cp->pdev;
4266 int i;
4267
4268 for (i = 0; i < N_TX_RINGS; i++) {
4269 cp->tx_tiny_bufs[i] =
4270 pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
4271 &cp->tx_tiny_dvma[i]);
4272 if (!cp->tx_tiny_bufs[i]) {
4273 cas_tx_tiny_free(cp);
4274 return -1;
4275 }
4276 }
4277 return 0;
4278}
4279
4280
4281static int cas_open(struct net_device *dev)
4282{
4283 struct cas *cp = netdev_priv(dev);
4284 int hw_was_up, err;
4285 unsigned long flags;
4286
4287 down(&cp->pm_sem);
4288
4289 hw_was_up = cp->hw_running;
4290
4291 /* The power-management semaphore protects the hw_running
4292 * etc. state so it is safe to do this bit without cp->lock
4293 */
4294 if (!cp->hw_running) {
4295 /* Reset the chip */
4296 cas_lock_all_save(cp, flags);
4297 /* We set the second arg to cas_reset to zero
4298 * because cas_init_hw below will have its second
4299 * argument set to non-zero, which will force
4300 * autonegotiation to start.
4301 */
4302 cas_reset(cp, 0);
4303 cp->hw_running = 1;
4304 cas_unlock_all_restore(cp, flags);
4305 }
4306
4307 if (cas_tx_tiny_alloc(cp) < 0)
4308 return -ENOMEM;
4309
4310 /* alloc rx descriptors */
4311 err = -ENOMEM;
4312 if (cas_alloc_rxds(cp) < 0)
4313 goto err_tx_tiny;
4314
4315 /* allocate spares */
4316 cas_spare_init(cp);
4317 cas_spare_recover(cp, GFP_KERNEL);
4318
4319 /* We can now request the interrupt as we know it's masked
4320 * on the controller. cassini+ has up to 4 interrupts
4321 * that can be used, but you need to do explicit pci interrupt
4322 * mapping to expose them
4323 */
4324 if (request_irq(cp->pdev->irq, cas_interrupt,
4325 SA_SHIRQ, dev->name, (void *) dev)) {
4326 printk(KERN_ERR "%s: failed to request irq !\n",
4327 cp->dev->name);
4328 err = -EAGAIN;
4329 goto err_spare;
4330 }
4331
4332 /* init hw */
4333 cas_lock_all_save(cp, flags);
4334 cas_clean_rings(cp);
4335 cas_init_hw(cp, !hw_was_up);
4336 cp->opened = 1;
4337 cas_unlock_all_restore(cp, flags);
4338
4339 netif_start_queue(dev);
4340 up(&cp->pm_sem);
4341 return 0;
4342
4343err_spare:
4344 cas_spare_free(cp);
4345 cas_free_rxds(cp);
4346err_tx_tiny:
4347 cas_tx_tiny_free(cp);
4348 up(&cp->pm_sem);
4349 return err;
4350}
4351
4352static int cas_close(struct net_device *dev)
4353{
4354 unsigned long flags;
4355 struct cas *cp = netdev_priv(dev);
4356
4357 /* Make sure we don't get distracted by suspend/resume */
4358 down(&cp->pm_sem);
4359
4360 netif_stop_queue(dev);
4361
4362 /* Stop traffic, mark us closed */
4363 cas_lock_all_save(cp, flags);
4364 cp->opened = 0;
4365 cas_reset(cp, 0);
4366 cas_phy_init(cp);
4367 cas_begin_auto_negotiation(cp, NULL);
4368 cas_clean_rings(cp);
4369 cas_unlock_all_restore(cp, flags);
4370
4371 free_irq(cp->pdev->irq, (void *) dev);
4372 cas_spare_free(cp);
4373 cas_free_rxds(cp);
4374 cas_tx_tiny_free(cp);
4375 up(&cp->pm_sem);
4376 return 0;
4377}
4378
4379static struct {
4380 const char name[ETH_GSTRING_LEN];
4381} ethtool_cassini_statnames[] = {
4382 {"collisions"},
4383 {"rx_bytes"},
4384 {"rx_crc_errors"},
4385 {"rx_dropped"},
4386 {"rx_errors"},
4387 {"rx_fifo_errors"},
4388 {"rx_frame_errors"},
4389 {"rx_length_errors"},
4390 {"rx_over_errors"},
4391 {"rx_packets"},
4392 {"tx_aborted_errors"},
4393 {"tx_bytes"},
4394 {"tx_dropped"},
4395 {"tx_errors"},
4396 {"tx_fifo_errors"},
4397 {"tx_packets"}
4398};
4399#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN)
4400
4401static struct {
4402 const int offsets; /* neg. values for 2nd arg to cas_read_phy */
4403} ethtool_register_table[] = {
4404 {-MII_BMSR},
4405 {-MII_BMCR},
4406 {REG_CAWR},
4407 {REG_INF_BURST},
4408 {REG_BIM_CFG},
4409 {REG_RX_CFG},
4410 {REG_HP_CFG},
4411 {REG_MAC_TX_CFG},
4412 {REG_MAC_RX_CFG},
4413 {REG_MAC_CTRL_CFG},
4414 {REG_MAC_XIF_CFG},
4415 {REG_MIF_CFG},
4416 {REG_PCS_CFG},
4417 {REG_SATURN_PCFG},
4418 {REG_PCS_MII_STATUS},
4419 {REG_PCS_STATE_MACHINE},
4420 {REG_MAC_COLL_EXCESS},
4421 {REG_MAC_COLL_LATE}
4422};
4423#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int))
4424#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4425
4426static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4427{
4428 u8 *p;
4429 int i;
4430 unsigned long flags;
4431
4432 spin_lock_irqsave(&cp->lock, flags);
4433 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4434 u16 hval;
4435 u32 val;
4436 if (ethtool_register_table[i].offsets < 0) {
4437 hval = cas_phy_read(cp,
4438 -ethtool_register_table[i].offsets);
4439 val = hval;
4440 } else {
4441 val= readl(cp->regs+ethtool_register_table[i].offsets);
4442 }
4443 memcpy(p, (u8 *)&val, sizeof(u32));
4444 }
4445 spin_unlock_irqrestore(&cp->lock, flags);
4446}
4447
4448static struct net_device_stats *cas_get_stats(struct net_device *dev)
4449{
4450 struct cas *cp = netdev_priv(dev);
4451 struct net_device_stats *stats = cp->net_stats;
4452 unsigned long flags;
4453 int i;
4454 unsigned long tmp;
4455
4456 /* we collate all of the stats into net_stats[N_TX_RING] */
4457 if (!cp->hw_running)
4458 return stats + N_TX_RINGS;
4459
4460 /* collect outstanding stats */
4461 /* WTZ: the Cassini spec gives these as 16 bit counters but
4462 * stored in 32-bit words. Added a mask of 0xffff to be safe,
4463 * in case the chip somehow puts any garbage in the other bits.
4464 * Also, counter usage didn't seem to mach what Adrian did
4465 * in the parts of the code that set these quantities. Made
4466 * that consistent.
4467 */
4468 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4469 stats[N_TX_RINGS].rx_crc_errors +=
4470 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4471 stats[N_TX_RINGS].rx_frame_errors +=
4472 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4473 stats[N_TX_RINGS].rx_length_errors +=
4474 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4475#if 1
4476 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4477 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4478 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4479 stats[N_TX_RINGS].collisions +=
4480 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4481#else
4482 stats[N_TX_RINGS].tx_aborted_errors +=
4483 readl(cp->regs + REG_MAC_COLL_EXCESS);
4484 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4485 readl(cp->regs + REG_MAC_COLL_LATE);
4486#endif
4487 cas_clear_mac_err(cp);
4488
4489 /* saved bits that are unique to ring 0 */
4490 spin_lock(&cp->stat_lock[0]);
4491 stats[N_TX_RINGS].collisions += stats[0].collisions;
4492 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4493 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4494 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4495 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4496 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4497 spin_unlock(&cp->stat_lock[0]);
4498
4499 for (i = 0; i < N_TX_RINGS; i++) {
4500 spin_lock(&cp->stat_lock[i]);
4501 stats[N_TX_RINGS].rx_length_errors +=
4502 stats[i].rx_length_errors;
4503 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4504 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4505 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4506 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4507 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4508 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4509 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4510 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4511 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4512 memset(stats + i, 0, sizeof(struct net_device_stats));
4513 spin_unlock(&cp->stat_lock[i]);
4514 }
4515 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4516 return stats + N_TX_RINGS;
4517}
4518
4519
4520static void cas_set_multicast(struct net_device *dev)
4521{
4522 struct cas *cp = netdev_priv(dev);
4523 u32 rxcfg, rxcfg_new;
4524 unsigned long flags;
4525 int limit = STOP_TRIES;
4526
4527 if (!cp->hw_running)
4528 return;
4529
4530 spin_lock_irqsave(&cp->lock, flags);
4531 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4532
4533 /* disable RX MAC and wait for completion */
4534 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4535 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4536 if (!limit--)
4537 break;
4538 udelay(10);
4539 }
4540
4541 /* disable hash filter and wait for completion */
4542 limit = STOP_TRIES;
4543 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4544 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4545 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4546 if (!limit--)
4547 break;
4548 udelay(10);
4549 }
4550
4551 /* program hash filters */
4552 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4553 rxcfg |= rxcfg_new;
4554 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4555 spin_unlock_irqrestore(&cp->lock, flags);
4556}
4557
4558static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4559{
4560 struct cas *cp = netdev_priv(dev);
4561 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
4562 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
4563 info->fw_version[0] = '\0';
4564 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
4565 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4566 cp->casreg_len : CAS_MAX_REGS;
4567 info->n_stats = CAS_NUM_STAT_KEYS;
4568}
4569
4570static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4571{
4572 struct cas *cp = netdev_priv(dev);
4573 u16 bmcr;
4574 int full_duplex, speed, pause;
4575 unsigned long flags;
4576 enum link_state linkstate = link_up;
4577
4578 cmd->advertising = 0;
4579 cmd->supported = SUPPORTED_Autoneg;
4580 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4581 cmd->supported |= SUPPORTED_1000baseT_Full;
4582 cmd->advertising |= ADVERTISED_1000baseT_Full;
4583 }
4584
4585 /* Record PHY settings if HW is on. */
4586 spin_lock_irqsave(&cp->lock, flags);
4587 bmcr = 0;
4588 linkstate = cp->lstate;
4589 if (CAS_PHY_MII(cp->phy_type)) {
4590 cmd->port = PORT_MII;
4591 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4592 XCVR_INTERNAL : XCVR_EXTERNAL;
4593 cmd->phy_address = cp->phy_addr;
4594 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4595 ADVERTISED_10baseT_Half |
4596 ADVERTISED_10baseT_Full |
4597 ADVERTISED_100baseT_Half |
4598 ADVERTISED_100baseT_Full;
4599
4600 cmd->supported |=
4601 (SUPPORTED_10baseT_Half |
4602 SUPPORTED_10baseT_Full |
4603 SUPPORTED_100baseT_Half |
4604 SUPPORTED_100baseT_Full |
4605 SUPPORTED_TP | SUPPORTED_MII);
4606
4607 if (cp->hw_running) {
4608 cas_mif_poll(cp, 0);
4609 bmcr = cas_phy_read(cp, MII_BMCR);
4610 cas_read_mii_link_mode(cp, &full_duplex,
4611 &speed, &pause);
4612 cas_mif_poll(cp, 1);
4613 }
4614
4615 } else {
4616 cmd->port = PORT_FIBRE;
4617 cmd->transceiver = XCVR_INTERNAL;
4618 cmd->phy_address = 0;
4619 cmd->supported |= SUPPORTED_FIBRE;
4620 cmd->advertising |= ADVERTISED_FIBRE;
4621
4622 if (cp->hw_running) {
4623 /* pcs uses the same bits as mii */
4624 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4625 cas_read_pcs_link_mode(cp, &full_duplex,
4626 &speed, &pause);
4627 }
4628 }
4629 spin_unlock_irqrestore(&cp->lock, flags);
4630
4631 if (bmcr & BMCR_ANENABLE) {
4632 cmd->advertising |= ADVERTISED_Autoneg;
4633 cmd->autoneg = AUTONEG_ENABLE;
4634 cmd->speed = ((speed == 10) ?
4635 SPEED_10 :
4636 ((speed == 1000) ?
4637 SPEED_1000 : SPEED_100));
4638 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4639 } else {
4640 cmd->autoneg = AUTONEG_DISABLE;
4641 cmd->speed =
4642 (bmcr & CAS_BMCR_SPEED1000) ?
4643 SPEED_1000 :
4644 ((bmcr & BMCR_SPEED100) ? SPEED_100:
4645 SPEED_10);
4646 cmd->duplex =
4647 (bmcr & BMCR_FULLDPLX) ?
4648 DUPLEX_FULL : DUPLEX_HALF;
4649 }
4650 if (linkstate != link_up) {
4651 /* Force these to "unknown" if the link is not up and
4652 * autonogotiation in enabled. We can set the link
4653 * speed to 0, but not cmd->duplex,
4654 * because its legal values are 0 and 1. Ethtool will
4655 * print the value reported in parentheses after the
4656 * word "Unknown" for unrecognized values.
4657 *
4658 * If in forced mode, we report the speed and duplex
4659 * settings that we configured.
4660 */
4661 if (cp->link_cntl & BMCR_ANENABLE) {
4662 cmd->speed = 0;
4663 cmd->duplex = 0xff;
4664 } else {
4665 cmd->speed = SPEED_10;
4666 if (cp->link_cntl & BMCR_SPEED100) {
4667 cmd->speed = SPEED_100;
4668 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4669 cmd->speed = SPEED_1000;
4670 }
4671 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4672 DUPLEX_FULL : DUPLEX_HALF;
4673 }
4674 }
4675 return 0;
4676}
4677
4678static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4679{
4680 struct cas *cp = netdev_priv(dev);
4681 unsigned long flags;
4682
4683 /* Verify the settings we care about. */
4684 if (cmd->autoneg != AUTONEG_ENABLE &&
4685 cmd->autoneg != AUTONEG_DISABLE)
4686 return -EINVAL;
4687
4688 if (cmd->autoneg == AUTONEG_DISABLE &&
4689 ((cmd->speed != SPEED_1000 &&
4690 cmd->speed != SPEED_100 &&
4691 cmd->speed != SPEED_10) ||
4692 (cmd->duplex != DUPLEX_HALF &&
4693 cmd->duplex != DUPLEX_FULL)))
4694 return -EINVAL;
4695
4696 /* Apply settings and restart link process. */
4697 spin_lock_irqsave(&cp->lock, flags);
4698 cas_begin_auto_negotiation(cp, cmd);
4699 spin_unlock_irqrestore(&cp->lock, flags);
4700 return 0;
4701}
4702
4703static int cas_nway_reset(struct net_device *dev)
4704{
4705 struct cas *cp = netdev_priv(dev);
4706 unsigned long flags;
4707
4708 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4709 return -EINVAL;
4710
4711 /* Restart link process. */
4712 spin_lock_irqsave(&cp->lock, flags);
4713 cas_begin_auto_negotiation(cp, NULL);
4714 spin_unlock_irqrestore(&cp->lock, flags);
4715
4716 return 0;
4717}
4718
4719static u32 cas_get_link(struct net_device *dev)
4720{
4721 struct cas *cp = netdev_priv(dev);
4722 return cp->lstate == link_up;
4723}
4724
4725static u32 cas_get_msglevel(struct net_device *dev)
4726{
4727 struct cas *cp = netdev_priv(dev);
4728 return cp->msg_enable;
4729}
4730
4731static void cas_set_msglevel(struct net_device *dev, u32 value)
4732{
4733 struct cas *cp = netdev_priv(dev);
4734 cp->msg_enable = value;
4735}
4736
4737static int cas_get_regs_len(struct net_device *dev)
4738{
4739 struct cas *cp = netdev_priv(dev);
4740 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4741}
4742
4743static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4744 void *p)
4745{
4746 struct cas *cp = netdev_priv(dev);
4747 regs->version = 0;
4748 /* cas_read_regs handles locks (cp->lock). */
4749 cas_read_regs(cp, p, regs->len / sizeof(u32));
4750}
4751
4752static int cas_get_stats_count(struct net_device *dev)
4753{
4754 return CAS_NUM_STAT_KEYS;
4755}
4756
4757static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4758{
4759 memcpy(data, &ethtool_cassini_statnames,
4760 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4761}
4762
4763static void cas_get_ethtool_stats(struct net_device *dev,
4764 struct ethtool_stats *estats, u64 *data)
4765{
4766 struct cas *cp = netdev_priv(dev);
4767 struct net_device_stats *stats = cas_get_stats(cp->dev);
4768 int i = 0;
4769 data[i++] = stats->collisions;
4770 data[i++] = stats->rx_bytes;
4771 data[i++] = stats->rx_crc_errors;
4772 data[i++] = stats->rx_dropped;
4773 data[i++] = stats->rx_errors;
4774 data[i++] = stats->rx_fifo_errors;
4775 data[i++] = stats->rx_frame_errors;
4776 data[i++] = stats->rx_length_errors;
4777 data[i++] = stats->rx_over_errors;
4778 data[i++] = stats->rx_packets;
4779 data[i++] = stats->tx_aborted_errors;
4780 data[i++] = stats->tx_bytes;
4781 data[i++] = stats->tx_dropped;
4782 data[i++] = stats->tx_errors;
4783 data[i++] = stats->tx_fifo_errors;
4784 data[i++] = stats->tx_packets;
4785 BUG_ON(i != CAS_NUM_STAT_KEYS);
4786}
4787
4788static struct ethtool_ops cas_ethtool_ops = {
4789 .get_drvinfo = cas_get_drvinfo,
4790 .get_settings = cas_get_settings,
4791 .set_settings = cas_set_settings,
4792 .nway_reset = cas_nway_reset,
4793 .get_link = cas_get_link,
4794 .get_msglevel = cas_get_msglevel,
4795 .set_msglevel = cas_set_msglevel,
4796 .get_regs_len = cas_get_regs_len,
4797 .get_regs = cas_get_regs,
4798 .get_stats_count = cas_get_stats_count,
4799 .get_strings = cas_get_strings,
4800 .get_ethtool_stats = cas_get_ethtool_stats,
4801};
4802
4803static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4804{
4805 struct cas *cp = netdev_priv(dev);
4806 struct mii_ioctl_data *data = if_mii(ifr);
4807 unsigned long flags;
4808 int rc = -EOPNOTSUPP;
4809
4810 /* Hold the PM semaphore while doing ioctl's or we may collide
4811 * with open/close and power management and oops.
4812 */
4813 down(&cp->pm_sem);
4814 switch (cmd) {
4815 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
4816 data->phy_id = cp->phy_addr;
4817 /* Fallthrough... */
4818
4819 case SIOCGMIIREG: /* Read MII PHY register. */
4820 spin_lock_irqsave(&cp->lock, flags);
4821 cas_mif_poll(cp, 0);
4822 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4823 cas_mif_poll(cp, 1);
4824 spin_unlock_irqrestore(&cp->lock, flags);
4825 rc = 0;
4826 break;
4827
4828 case SIOCSMIIREG: /* Write MII PHY register. */
4829 if (!capable(CAP_NET_ADMIN)) {
4830 rc = -EPERM;
4831 break;
4832 }
4833 spin_lock_irqsave(&cp->lock, flags);
4834 cas_mif_poll(cp, 0);
4835 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4836 cas_mif_poll(cp, 1);
4837 spin_unlock_irqrestore(&cp->lock, flags);
4838 break;
4839 default:
4840 break;
4841 };
4842
4843 up(&cp->pm_sem);
4844 return rc;
4845}
4846
4847static int __devinit cas_init_one(struct pci_dev *pdev,
4848 const struct pci_device_id *ent)
4849{
4850 static int cas_version_printed = 0;
4851 unsigned long casreg_base, casreg_len;
4852 struct net_device *dev;
4853 struct cas *cp;
4854 int i, err, pci_using_dac;
4855 u16 pci_cmd;
4856 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4857
4858 if (cas_version_printed++ == 0)
4859 printk(KERN_INFO "%s", version);
4860
4861 err = pci_enable_device(pdev);
4862 if (err) {
4863 printk(KERN_ERR PFX "Cannot enable PCI device, "
4864 "aborting.\n");
4865 return err;
4866 }
4867
4868 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4869 printk(KERN_ERR PFX "Cannot find proper PCI device "
4870 "base address, aborting.\n");
4871 err = -ENODEV;
4872 goto err_out_disable_pdev;
4873 }
4874
4875 dev = alloc_etherdev(sizeof(*cp));
4876 if (!dev) {
4877 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
4878 err = -ENOMEM;
4879 goto err_out_disable_pdev;
4880 }
4881 SET_MODULE_OWNER(dev);
4882 SET_NETDEV_DEV(dev, &pdev->dev);
4883
4884 err = pci_request_regions(pdev, dev->name);
4885 if (err) {
4886 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
4887 "aborting.\n");
4888 goto err_out_free_netdev;
4889 }
4890 pci_set_master(pdev);
4891
4892 /* we must always turn on parity response or else parity
4893 * doesn't get generated properly. disable SERR/PERR as well.
4894 * in addition, we want to turn MWI on.
4895 */
4896 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4897 pci_cmd &= ~PCI_COMMAND_SERR;
4898 pci_cmd |= PCI_COMMAND_PARITY;
4899 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4900 pci_set_mwi(pdev);
4901 /*
4902 * On some architectures, the default cache line size set
4903 * by pci_set_mwi reduces perforamnce. We have to increase
4904 * it for this case. To start, we'll print some configuration
4905 * data.
4906 */
4907#if 1
4908 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4909 &orig_cacheline_size);
4910 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4911 cas_cacheline_size =
4912 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4913 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4914 if (pci_write_config_byte(pdev,
4915 PCI_CACHE_LINE_SIZE,
4916 cas_cacheline_size)) {
4917 printk(KERN_ERR PFX "Could not set PCI cache "
4918 "line size\n");
4919 goto err_write_cacheline;
4920 }
4921 }
4922#endif
4923
4924
4925 /* Configure DMA attributes. */
4926 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4927 pci_using_dac = 1;
4928 err = pci_set_consistent_dma_mask(pdev,
4929 DMA_64BIT_MASK);
4930 if (err < 0) {
4931 printk(KERN_ERR PFX "Unable to obtain 64-bit DMA "
4932 "for consistent allocations\n");
4933 goto err_out_free_res;
4934 }
4935
4936 } else {
4937 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4938 if (err) {
4939 printk(KERN_ERR PFX "No usable DMA configuration, "
4940 "aborting.\n");
4941 goto err_out_free_res;
4942 }
4943 pci_using_dac = 0;
4944 }
4945
4946 casreg_base = pci_resource_start(pdev, 0);
4947 casreg_len = pci_resource_len(pdev, 0);
4948
4949 cp = netdev_priv(dev);
4950 cp->pdev = pdev;
4951#if 1
4952 /* A value of 0 indicates we never explicitly set it */
4953 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4954#endif
4955 cp->dev = dev;
4956 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4957 cassini_debug;
4958
4959 cp->link_transition = LINK_TRANSITION_UNKNOWN;
4960 cp->link_transition_jiffies_valid = 0;
4961
4962 spin_lock_init(&cp->lock);
4963 spin_lock_init(&cp->rx_inuse_lock);
4964 spin_lock_init(&cp->rx_spare_lock);
4965 for (i = 0; i < N_TX_RINGS; i++) {
4966 spin_lock_init(&cp->stat_lock[i]);
4967 spin_lock_init(&cp->tx_lock[i]);
4968 }
4969 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4970 init_MUTEX(&cp->pm_sem);
4971
4972 init_timer(&cp->link_timer);
4973 cp->link_timer.function = cas_link_timer;
4974 cp->link_timer.data = (unsigned long) cp;
4975
4976#if 1
4977 /* Just in case the implementation of atomic operations
4978 * change so that an explicit initialization is necessary.
4979 */
4980 atomic_set(&cp->reset_task_pending, 0);
4981 atomic_set(&cp->reset_task_pending_all, 0);
4982 atomic_set(&cp->reset_task_pending_spare, 0);
4983 atomic_set(&cp->reset_task_pending_mtu, 0);
4984#endif
4985 INIT_WORK(&cp->reset_task, cas_reset_task, cp);
4986
4987 /* Default link parameters */
4988 if (link_mode >= 0 && link_mode <= 6)
4989 cp->link_cntl = link_modes[link_mode];
4990 else
4991 cp->link_cntl = BMCR_ANENABLE;
4992 cp->lstate = link_down;
4993 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
4994 netif_carrier_off(cp->dev);
4995 cp->timer_ticks = 0;
4996
4997 /* give us access to cassini registers */
4998 cp->regs = ioremap(casreg_base, casreg_len);
4999 if (cp->regs == 0UL) {
5000 printk(KERN_ERR PFX "Cannot map device registers, "
5001 "aborting.\n");
5002 goto err_out_free_res;
5003 }
5004 cp->casreg_len = casreg_len;
5005
5006 pci_save_state(pdev);
5007 cas_check_pci_invariants(cp);
5008 cas_hard_reset(cp);
5009 cas_reset(cp, 0);
5010 if (cas_check_invariants(cp))
5011 goto err_out_iounmap;
5012
5013 cp->init_block = (struct cas_init_block *)
5014 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5015 &cp->block_dvma);
5016 if (!cp->init_block) {
5017 printk(KERN_ERR PFX "Cannot allocate init block, "
5018 "aborting.\n");
5019 goto err_out_iounmap;
5020 }
5021
5022 for (i = 0; i < N_TX_RINGS; i++)
5023 cp->init_txds[i] = cp->init_block->txds[i];
5024
5025 for (i = 0; i < N_RX_DESC_RINGS; i++)
5026 cp->init_rxds[i] = cp->init_block->rxds[i];
5027
5028 for (i = 0; i < N_RX_COMP_RINGS; i++)
5029 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5030
5031 for (i = 0; i < N_RX_FLOWS; i++)
5032 skb_queue_head_init(&cp->rx_flows[i]);
5033
5034 dev->open = cas_open;
5035 dev->stop = cas_close;
5036 dev->hard_start_xmit = cas_start_xmit;
5037 dev->get_stats = cas_get_stats;
5038 dev->set_multicast_list = cas_set_multicast;
5039 dev->do_ioctl = cas_ioctl;
5040 dev->ethtool_ops = &cas_ethtool_ops;
5041 dev->tx_timeout = cas_tx_timeout;
5042 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5043 dev->change_mtu = cas_change_mtu;
5044#ifdef USE_NAPI
5045 dev->poll = cas_poll;
5046 dev->weight = 64;
5047#endif
5048#ifdef CONFIG_NET_POLL_CONTROLLER
5049 dev->poll_controller = cas_netpoll;
5050#endif
5051 dev->irq = pdev->irq;
5052 dev->dma = 0;
5053
5054 /* Cassini features. */
5055 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5056 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5057
5058 if (pci_using_dac)
5059 dev->features |= NETIF_F_HIGHDMA;
5060
5061 if (register_netdev(dev)) {
5062 printk(KERN_ERR PFX "Cannot register net device, "
5063 "aborting.\n");
5064 goto err_out_free_consistent;
5065 }
5066
5067 i = readl(cp->regs + REG_BIM_CFG);
5068 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
5069 "Ethernet[%d] ", dev->name,
5070 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5071 (i & BIM_CFG_32BIT) ? "32" : "64",
5072 (i & BIM_CFG_66MHZ) ? "66" : "33",
5073 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq);
5074
5075 for (i = 0; i < 6; i++)
5076 printk("%2.2x%c", dev->dev_addr[i],
5077 i == 5 ? ' ' : ':');
5078 printk("\n");
5079
5080 pci_set_drvdata(pdev, dev);
5081 cp->hw_running = 1;
5082 cas_entropy_reset(cp);
5083 cas_phy_init(cp);
5084 cas_begin_auto_negotiation(cp, NULL);
5085 return 0;
5086
5087err_out_free_consistent:
5088 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5089 cp->init_block, cp->block_dvma);
5090
5091err_out_iounmap:
5092 down(&cp->pm_sem);
5093 if (cp->hw_running)
5094 cas_shutdown(cp);
5095 up(&cp->pm_sem);
5096
5097 iounmap(cp->regs);
5098
5099
5100err_out_free_res:
5101 pci_release_regions(pdev);
5102
5103err_write_cacheline:
5104 /* Try to restore it in case the error occured after we
5105 * set it.
5106 */
5107 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5108
5109err_out_free_netdev:
5110 free_netdev(dev);
5111
5112err_out_disable_pdev:
5113 pci_disable_device(pdev);
5114 pci_set_drvdata(pdev, NULL);
5115 return -ENODEV;
5116}
5117
5118static void __devexit cas_remove_one(struct pci_dev *pdev)
5119{
5120 struct net_device *dev = pci_get_drvdata(pdev);
5121 struct cas *cp;
5122 if (!dev)
5123 return;
5124
5125 cp = netdev_priv(dev);
5126 unregister_netdev(dev);
5127
5128 down(&cp->pm_sem);
5129 flush_scheduled_work();
5130 if (cp->hw_running)
5131 cas_shutdown(cp);
5132 up(&cp->pm_sem);
5133
5134#if 1
5135 if (cp->orig_cacheline_size) {
5136 /* Restore the cache line size if we had modified
5137 * it.
5138 */
5139 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5140 cp->orig_cacheline_size);
5141 }
5142#endif
5143 pci_free_consistent(pdev, sizeof(struct cas_init_block),
5144 cp->init_block, cp->block_dvma);
5145 iounmap(cp->regs);
5146 free_netdev(dev);
5147 pci_release_regions(pdev);
5148 pci_disable_device(pdev);
5149 pci_set_drvdata(pdev, NULL);
5150}
5151
5152#ifdef CONFIG_PM
5153static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
5154{
5155 struct net_device *dev = pci_get_drvdata(pdev);
5156 struct cas *cp = netdev_priv(dev);
5157 unsigned long flags;
5158
5159 /* We hold the PM semaphore during entire driver
5160 * sleep time
5161 */
5162 down(&cp->pm_sem);
5163
5164 /* If the driver is opened, we stop the DMA */
5165 if (cp->opened) {
5166 netif_device_detach(dev);
5167
5168 cas_lock_all_save(cp, flags);
5169
5170 /* We can set the second arg of cas_reset to 0
5171 * because on resume, we'll call cas_init_hw with
5172 * its second arg set so that autonegotiation is
5173 * restarted.
5174 */
5175 cas_reset(cp, 0);
5176 cas_clean_rings(cp);
5177 cas_unlock_all_restore(cp, flags);
5178 }
5179
5180 if (cp->hw_running)
5181 cas_shutdown(cp);
5182
5183 return 0;
5184}
5185
5186static int cas_resume(struct pci_dev *pdev)
5187{
5188 struct net_device *dev = pci_get_drvdata(pdev);
5189 struct cas *cp = netdev_priv(dev);
5190
5191 printk(KERN_INFO "%s: resuming\n", dev->name);
5192
5193 cas_hard_reset(cp);
5194 if (cp->opened) {
5195 unsigned long flags;
5196 cas_lock_all_save(cp, flags);
5197 cas_reset(cp, 0);
5198 cp->hw_running = 1;
5199 cas_clean_rings(cp);
5200 cas_init_hw(cp, 1);
5201 cas_unlock_all_restore(cp, flags);
5202
5203 netif_device_attach(dev);
5204 }
5205 up(&cp->pm_sem);
5206 return 0;
5207}
5208#endif /* CONFIG_PM */
5209
5210static struct pci_driver cas_driver = {
5211 .name = DRV_MODULE_NAME,
5212 .id_table = cas_pci_tbl,
5213 .probe = cas_init_one,
5214 .remove = __devexit_p(cas_remove_one),
5215#ifdef CONFIG_PM
5216 .suspend = cas_suspend,
5217 .resume = cas_resume
5218#endif
5219};
5220
5221static int __init cas_init(void)
5222{
5223 if (linkdown_timeout > 0)
5224 link_transition_timeout = linkdown_timeout * HZ;
5225 else
5226 link_transition_timeout = 0;
5227
5228 return pci_module_init(&cas_driver);
5229}
5230
5231static void __exit cas_cleanup(void)
5232{
5233 pci_unregister_driver(&cas_driver);
5234}
5235
5236module_init(cas_init);
5237module_exit(cas_cleanup);
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
new file mode 100644
index 000000000000..88063ef16cf6
--- /dev/null
+++ b/drivers/net/cassini.h
@@ -0,0 +1,4425 @@
1/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
2 * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
3 *
4 * Copyright (C) 2004 Sun Microsystems Inc.
5 * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
20 * 02111-1307, USA.
21 *
22 * vendor id: 0x108E (Sun Microsystems, Inc.)
23 * device id: 0xabba (Cassini)
24 * revision ids: 0x01 = Cassini
25 * 0x02 = Cassini rev 2
26 * 0x10 = Cassini+
27 * 0x11 = Cassini+ 0.2u
28 *
29 * vendor id: 0x100b (National Semiconductor)
30 * device id: 0x0035 (DP83065/Saturn)
31 * revision ids: 0x30 = Saturn B2
32 *
33 * rings are all offset from 0.
34 *
35 * there are two clock domains:
36 * PCI: 33/66MHz clock
37 * chip: 125MHz clock
38 */
39
40#ifndef _CASSINI_H
41#define _CASSINI_H
42
43/* cassini register map: 2M memory mapped in 32-bit memory space accessible as
44 * 32-bit words. there is no i/o port access. REG_ addresses are
45 * shared between cassini and cassini+. REG_PLUS_ addresses only
46 * appear in cassini+. REG_MINUS_ addresses only appear in cassini.
47 */
48#define CAS_ID_REV2 0x02
49#define CAS_ID_REVPLUS 0x10
50#define CAS_ID_REVPLUS02u 0x11
51#define CAS_ID_REVSATURNB2 0x30
52
53/** global resources **/
54
55/* this register sets the weights for the weighted round robin arbiter. e.g.,
56 * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit
57 * for its next turn to access the pci bus.
58 * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8
59 * DEFAULT: 0x0, SIZE: 5 bits
60 */
61#define REG_CAWR 0x0004 /* core arbitration weight */
62#define CAWR_RX_DMA_WEIGHT_SHIFT 0
63#define CAWR_RX_DMA_WEIGHT_MASK 0x03 /* [0:1] */
64#define CAWR_TX_DMA_WEIGHT_SHIFT 2
65#define CAWR_TX_DMA_WEIGHT_MASK 0x0C /* [3:2] */
66#define CAWR_RR_DIS 0x10 /* [4] */
67
68/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst
69 * sizes determined by length of packet or descriptor transfer and the
70 * max length allowed by the target.
71 * DEFAULT: 0x0, SIZE: 1 bit
72 */
73#define REG_INF_BURST 0x0008 /* infinite burst enable reg */
74#define INF_BURST_EN 0x1 /* enable */
75
76/* top level interrupts [0-9] are auto-cleared to 0 when the status
77 * register is read. second level interrupts [13 - 18] are cleared at
78 * the source. tx completion register 3 is replicated in [19 - 31]
79 * DEFAULT: 0x00000000, SIZE: 29 bits
80 */
81#define REG_INTR_STATUS 0x000C /* interrupt status register */
82#define INTR_TX_INTME 0x00000001 /* frame w/ INT ME desc bit set
83 xferred from host queue to
84 TX FIFO */
85#define INTR_TX_ALL 0x00000002 /* all xmit frames xferred into
86 TX FIFO. i.e.,
87 TX Kick == TX complete. if
88 PACED_MODE set, then TX FIFO
89 also empty */
90#define INTR_TX_DONE 0x00000004 /* any frame xferred into tx
91 FIFO */
92#define INTR_TX_TAG_ERROR 0x00000008 /* TX FIFO tag framing
93 corrupted. FATAL ERROR */
94#define INTR_RX_DONE 0x00000010 /* at least 1 frame xferred
95 from RX FIFO to host mem.
96 RX completion reg updated.
97 may be delayed by recv
98 intr blanking. */
99#define INTR_RX_BUF_UNAVAIL 0x00000020 /* no more receive buffers.
100 RX Kick == RX complete */
101#define INTR_RX_TAG_ERROR 0x00000040 /* RX FIFO tag framing
102 corrupted. FATAL ERROR */
103#define INTR_RX_COMP_FULL 0x00000080 /* no more room in completion
104 ring to post descriptors.
105 RX complete head incr to
106 almost reach RX complete
107 tail */
108#define INTR_RX_BUF_AE 0x00000100 /* less than the
109 programmable threshold #
110 of free descr avail for
111 hw use */
112#define INTR_RX_COMP_AF 0x00000200 /* less than the
113 programmable threshold #
114 of descr spaces for hw
115 use in completion descr
116 ring */
117#define INTR_RX_LEN_MISMATCH 0x00000400 /* len field from MAC !=
118 len of non-reassembly pkt
119 from fifo during DMA or
120 header parser provides TCP
121 header and payload size >
122 MAC packet size.
123 FATAL ERROR */
124#define INTR_SUMMARY 0x00001000 /* summary interrupt bit. this
125 bit will be set if an interrupt
126 generated on the pci bus. useful
127 when driver is polling for
128 interrupts */
129#define INTR_PCS_STATUS 0x00002000 /* PCS interrupt status register */
130#define INTR_TX_MAC_STATUS 0x00004000 /* TX MAC status register has at
131 least 1 unmasked interrupt set */
132#define INTR_RX_MAC_STATUS 0x00008000 /* RX MAC status register has at
133 least 1 unmasked interrupt set */
134#define INTR_MAC_CTRL_STATUS 0x00010000 /* MAC control status register has
135 at least 1 unmasked interrupt
136 set */
137#define INTR_MIF_STATUS 0x00020000 /* MIF status register has at least
138 1 unmasked interrupt set */
139#define INTR_PCI_ERROR_STATUS 0x00040000 /* PCI error status register in the
140 BIF has at least 1 unmasked
141 interrupt set */
142#define INTR_TX_COMP_3_MASK 0xFFF80000 /* mask for TX completion
143 3 reg data */
144#define INTR_TX_COMP_3_SHIFT 19
145#define INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \
146 INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \
147 INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \
148 INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \
149 INTR_MAC_CTRL_STATUS)
150
151/* determines which status events will cause an interrupt. layout same
152 * as REG_INTR_STATUS.
153 * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits
154 */
155#define REG_INTR_MASK 0x0010 /* Interrupt mask */
156
157/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS.
158 * useful when driver is polling for interrupts. layout same as REG_INTR_MASK.
159 * DEFAULT: 0x00000000, SIZE: 12 bits
160 */
161#define REG_ALIAS_CLEAR 0x0014 /* alias clear mask
162 (used w/ status alias) */
163/* same as REG_INTR_STATUS except that only bits cleared are those selected by
164 * REG_ALIAS_CLEAR
165 * DEFAULT: 0x00000000, SIZE: 29 bits
166 */
167#define REG_INTR_STATUS_ALIAS 0x001C /* interrupt status alias
168 (selective clear) */
169
170/* DEFAULT: 0x0, SIZE: 3 bits */
171#define REG_PCI_ERR_STATUS 0x1000 /* PCI error status */
172#define PCI_ERR_BADACK 0x01 /* reserved in Cassini+.
173 set if no ACK64# during ABS64 cycle
174 in Cassini. */
175#define PCI_ERR_DTRTO 0x02 /* delayed xaction timeout. set if
176 no read retry after 2^15 clocks */
177#define PCI_ERR_OTHER 0x04 /* other PCI errors */
178#define PCI_ERR_BIM_DMA_WRITE 0x08 /* BIM received 0 count DMA write req.
179 unused in Cassini. */
180#define PCI_ERR_BIM_DMA_READ 0x10 /* BIM received 0 count DMA read req.
181 unused in Cassini. */
182#define PCI_ERR_BIM_DMA_TIMEOUT 0x20 /* BIM received 255 retries during
183 DMA. unused in cassini. */
184
185/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event
186 * causes an interrupt to be generated.
187 * DEFAULT: 0x7, SIZE: 3 bits
188 */
189#define REG_PCI_ERR_STATUS_MASK 0x1004 /* PCI Error status mask */
190
191/* used to configure PCI related parameters that are not in PCI config space.
192 * DEFAULT: 0bxx000, SIZE: 5 bits
193 */
194#define REG_BIM_CFG 0x1008 /* BIM Configuration */
195#define BIM_CFG_RESERVED0 0x001 /* reserved */
196#define BIM_CFG_RESERVED1 0x002 /* reserved */
197#define BIM_CFG_64BIT_DISABLE 0x004 /* disable 64-bit mode */
198#define BIM_CFG_66MHZ 0x008 /* (ro) 1 = 66MHz, 0 = < 66MHz */
199#define BIM_CFG_32BIT 0x010 /* (ro) 1 = 32-bit slot, 0 = 64-bit */
200#define BIM_CFG_DPAR_INTR_ENABLE 0x020 /* detected parity err enable */
201#define BIM_CFG_RMA_INTR_ENABLE 0x040 /* master abort intr enable */
202#define BIM_CFG_RTA_INTR_ENABLE 0x080 /* target abort intr enable */
203#define BIM_CFG_RESERVED2 0x100 /* reserved */
204#define BIM_CFG_BIM_DISABLE 0x200 /* stop BIM DMA. use before global
205 reset. reserved in Cassini. */
206#define BIM_CFG_BIM_STATUS 0x400 /* (ro) 1 = BIM DMA suspended.
207 reserved in Cassini. */
208#define BIM_CFG_PERROR_BLOCK 0x800 /* block PERR# to pci bus. def: 0.
209 reserved in Cassini. */
210
211/* DEFAULT: 0x00000000, SIZE: 32 bits */
212#define REG_BIM_DIAG 0x100C /* BIM Diagnostic */
213#define BIM_DIAG_MSTR_SM_MASK 0x3FFFFF00 /* PCI master controller state
214 machine bits [21:0] */
215#define BIM_DIAG_BRST_SM_MASK 0x7F /* PCI burst controller state
216 machine bits [6:0] */
217
218/* writing to SW_RESET_TX and SW_RESET_RX will issue a global
219 * reset. poll until TX and RX read back as 0's for completion.
220 */
221#define REG_SW_RESET 0x1010 /* Software reset */
222#define SW_RESET_TX 0x00000001 /* reset TX DMA engine. poll until
223 cleared to 0. */
224#define SW_RESET_RX 0x00000002 /* reset RX DMA engine. poll until
225 cleared to 0. */
226#define SW_RESET_RSTOUT 0x00000004 /* force RSTOUT# pin active (low).
227 resets PHY and anything else
228 connected to RSTOUT#. RSTOUT#
229 is also activated by local PCI
230 reset when hot-swap is being
231 done. */
232#define SW_RESET_BLOCK_PCS_SLINK 0x00000008 /* if a global reset is done with
233 this bit set, PCS and SLINK
234 modules won't be reset.
235 i.e., link won't drop. */
236#define SW_RESET_BREQ_SM_MASK 0x00007F00 /* breq state machine [6:0] */
237#define SW_RESET_PCIARB_SM_MASK 0x00070000 /* pci arbitration state bits:
238 0b000: ARB_IDLE1
239 0b001: ARB_IDLE2
240 0b010: ARB_WB_ACK
241 0b011: ARB_WB_WAT
242 0b100: ARB_RB_ACK
243 0b101: ARB_RB_WAT
244 0b110: ARB_RB_END
245 0b111: ARB_WB_END */
246#define SW_RESET_RDPCI_SM_MASK 0x00300000 /* read pci state bits:
247 0b00: RD_PCI_WAT
248 0b01: RD_PCI_RDY
249 0b11: RD_PCI_ACK */
250#define SW_RESET_RDARB_SM_MASK 0x00C00000 /* read arbitration state bits:
251 0b00: AD_IDL_RX
252 0b01: AD_ACK_RX
253 0b10: AD_ACK_TX
254 0b11: AD_IDL_TX */
255#define SW_RESET_WRPCI_SM_MASK 0x06000000 /* write pci state bits
256 0b00: WR_PCI_WAT
257 0b01: WR_PCI_RDY
258 0b11: WR_PCI_ACK */
259#define SW_RESET_WRARB_SM_MASK 0x38000000 /* write arbitration state bits:
260 0b000: ARB_IDLE1
261 0b001: ARB_IDLE2
262 0b010: ARB_TX_ACK
263 0b011: ARB_TX_WAT
264 0b100: ARB_RX_ACK
265 0b110: ARB_RX_WAT */
266
267/* Cassini only. 64-bit register used to check PCI datapath. when read,
268 * value written has both lower and upper 32-bit halves rotated to the right
269 * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF
270 */
271#define REG_MINUS_BIM_DATAPATH_TEST 0x1018 /* Cassini: BIM datapath test
272 Cassini+: reserved */
273
274/* output enables are provided for each device's chip select and for the rest
275 * of the outputs from cassini to its local bus devices. two sw programmable
276 * bits are connected to general purpus control/status bits.
277 * DEFAULT: 0x7
278 */
279#define REG_BIM_LOCAL_DEV_EN 0x1020 /* BIM local device
280 output EN. default: 0x7 */
281#define BIM_LOCAL_DEV_PAD 0x01 /* address bus, RW signal, and
282 OE signal output enable on the
283 local bus interface. these
284 are shared between both local
285 bus devices. tristate when 0. */
286#define BIM_LOCAL_DEV_PROM 0x02 /* PROM chip select */
287#define BIM_LOCAL_DEV_EXT 0x04 /* secondary local bus device chip
288 select output enable */
289#define BIM_LOCAL_DEV_SOFT_0 0x08 /* sw programmable ctrl bit 0 */
290#define BIM_LOCAL_DEV_SOFT_1 0x10 /* sw programmable ctrl bit 1 */
291#define BIM_LOCAL_DEV_HW_RESET 0x20 /* internal hw reset. Cassini+ only. */
292
293/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR
294 * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI.
295 * _DATA_HI should be the last access of the sequence.
296 * DEFAULT: undefined
297 */
298#define REG_BIM_BUFFER_ADDR 0x1024 /* BIM buffer address. for
299 purposes. */
300#define BIM_BUFFER_ADDR_MASK 0x3F /* index (0 - 23) of buffer */
301#define BIM_BUFFER_WR_SELECT 0x40 /* write buffer access = 1
302 read buffer access = 0 */
303/* DEFAULT: undefined */
304#define REG_BIM_BUFFER_DATA_LOW 0x1028 /* BIM buffer data low */
305#define REG_BIM_BUFFER_DATA_HI 0x102C /* BIM buffer data high */
306
307/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer.
308 * bit auto-clears when done with status read from _SUMMARY and _PASS bits.
309 */
310#define REG_BIM_RAM_BIST 0x102C /* BIM RAM (read buffer) BIST
311 control/status */
312#define BIM_RAM_BIST_RD_START 0x01 /* start BIST for BIM read buffer */
313#define BIM_RAM_BIST_WR_START 0x02 /* start BIST for BIM write buffer.
314 Cassini only. reserved in
315 Cassini+. */
316#define BIM_RAM_BIST_RD_PASS 0x04 /* summary BIST pass status for read
317 buffer. */
318#define BIM_RAM_BIST_WR_PASS 0x08 /* summary BIST pass status for write
319 buffer. Cassini only. reserved
320 in Cassini+. */
321#define BIM_RAM_BIST_RD_LOW_PASS 0x10 /* read low bank passes BIST */
322#define BIM_RAM_BIST_RD_HI_PASS 0x20 /* read high bank passes BIST */
323#define BIM_RAM_BIST_WR_LOW_PASS 0x40 /* write low bank passes BIST.
324 Cassini only. reserved in
325 Cassini+. */
326#define BIM_RAM_BIST_WR_HI_PASS 0x80 /* write high bank passes BIST.
327 Cassini only. reserved in
328 Cassini+. */
329
330/* ASUN: i'm not sure what this does as it's not in the spec.
331 * DEFAULT: 0xFC
332 */
333#define REG_BIM_DIAG_MUX 0x1030 /* BIM diagnostic probe mux
334 select register */
335
336/* enable probe monitoring mode and select data appearing on the P_A* bus. bit
337 * values for _SEL_HI_MASK and _SEL_LOW_MASK:
338 * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w,
339 * wtc empty r, post pci)
340 * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp,
341 * pci rpkt comp, txdma wr req, txdma wr ack,
342 * txdma wr rdy, txdma wr xfr done)
343 * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd,
344 * rd arb state, rd pci state)
345 * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state,
346 * wrpci state)
347 * 0x4: pci io probe[7:0] 0x5: pci io probe[15:8]
348 * 0x6: pci io probe[23:16] 0x7: pci io probe[31:24]
349 * 0x8: pci io probe[39:32] 0x9: pci io probe[47:40]
350 * 0xa: pci io probe[55:48] 0xb: pci io probe[63:56]
351 * the following are not available in Cassini:
352 * 0xc: rx probe[7:0] 0xd: tx probe[7:0]
353 * 0xe: hp probe[7:0] 0xf: mac probe[7:0]
354 */
355#define REG_PLUS_PROBE_MUX_SELECT 0x1034 /* Cassini+: PROBE MUX SELECT */
356#define PROBE_MUX_EN 0x80000000 /* allow probe signals to be
357 driven on local bus P_A[15:0]
358 for debugging */
359#define PROBE_MUX_SUB_MUX_MASK 0x0000FF00 /* select sub module probe signals:
360 0x03 = mac[1:0]
361 0x0C = rx[1:0]
362 0x30 = tx[1:0]
363 0xC0 = hp[1:0] */
364#define PROBE_MUX_SEL_HI_MASK 0x000000F0 /* select which module to appear
365 on P_A[15:8]. see above for
366 values. */
367#define PROBE_MUX_SEL_LOW_MASK 0x0000000F /* select which module to appear
368 on P_A[7:0]. see above for
369 values. */
370
371/* values mean the same thing as REG_INTR_MASK excep that it's for INTB.
372 DEFAULT: 0x1F */
373#define REG_PLUS_INTR_MASK_1 0x1038 /* Cassini+: interrupt mask
374 register 2 for INTB */
375#define REG_PLUS_INTRN_MASK(x) (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16)
376/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to
377 * all of the alternate (2-4) INTR registers while _1 corresponds to only
378 * _MASK_1 and _STATUS_1 registers.
379 * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers
380 */
381#define INTR_RX_DONE_ALT 0x01
382#define INTR_RX_COMP_FULL_ALT 0x02
383#define INTR_RX_COMP_AF_ALT 0x04
384#define INTR_RX_BUF_UNAVAIL_1 0x08
385#define INTR_RX_BUF_AE_1 0x10 /* almost empty */
386#define INTRN_MASK_RX_EN 0x80
387#define INTRN_MASK_CLEAR_ALL (INTR_RX_DONE_ALT | \
388 INTR_RX_COMP_FULL_ALT | \
389 INTR_RX_COMP_AF_ALT | \
390 INTR_RX_BUF_UNAVAIL_1 | \
391 INTR_RX_BUF_AE_1)
392#define REG_PLUS_INTR_STATUS_1 0x103C /* Cassini+: interrupt status
393 register 2 for INTB. default: 0x1F */
394#define REG_PLUS_INTRN_STATUS(x) (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16)
395#define INTR_STATUS_ALT_INTX_EN 0x80 /* generate INTX when one of the
396 flags are set. enables desc ring. */
397
398#define REG_PLUS_ALIAS_CLEAR_1 0x1040 /* Cassini+: alias clear mask
399 register 2 for INTB */
400#define REG_PLUS_ALIASN_CLEAR(x) (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16)
401
402#define REG_PLUS_INTR_STATUS_ALIAS_1 0x1044 /* Cassini+: interrupt status
403 register alias 2 for INTB */
404#define REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16)
405
406#define REG_SATURN_PCFG 0x106c /* pin configuration register for
407 integrated macphy */
408
409#define SATURN_PCFG_TLA 0x00000001 /* 1 = phy actled */
410#define SATURN_PCFG_FLA 0x00000002 /* 1 = phy link10led */
411#define SATURN_PCFG_CLA 0x00000004 /* 1 = phy link100led */
412#define SATURN_PCFG_LLA 0x00000008 /* 1 = phy link1000led */
413#define SATURN_PCFG_RLA 0x00000010 /* 1 = phy duplexled */
414#define SATURN_PCFG_PDS 0x00000020 /* phy debug mode.
415 0 = normal */
416#define SATURN_PCFG_MTP 0x00000080 /* test point select */
417#define SATURN_PCFG_GMO 0x00000100 /* GMII observe. 1 =
418 GMII on SERDES pins for
419 monitoring. */
420#define SATURN_PCFG_FSI 0x00000200 /* 1 = freeze serdes/gmii. all
421 pins configed as outputs.
422 for power saving when using
423 internal phy. */
424#define SATURN_PCFG_LAD 0x00000800 /* 0 = mac core led ctrl
425 polarity from strapping
426 value.
427 1 = mac core led ctrl
428 polarity active low. */
429
430
431/** transmit dma registers **/
432#define MAX_TX_RINGS_SHIFT 2
433#define MAX_TX_RINGS (1 << MAX_TX_RINGS_SHIFT)
434#define MAX_TX_RINGS_MASK (MAX_TX_RINGS - 1)
435
436/* TX configuration.
437 * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8
438 * DEFAULT: 0x3F000001
439 */
440#define REG_TX_CFG 0x2004 /* TX config */
441#define TX_CFG_DMA_EN 0x00000001 /* enable TX DMA. if cleared, DMA
442 will stop after xfer of current
443 buffer has been completed. */
444#define TX_CFG_FIFO_PIO_SEL 0x00000002 /* TX DMA FIFO can be
445 accessed w/ FIFO addr
446 and data registers.
447 TX DMA should be
448 disabled. */
449#define TX_CFG_DESC_RING0_MASK 0x0000003C /* # desc entries in
450 ring 1. */
451#define TX_CFG_DESC_RING0_SHIFT 2
452#define TX_CFG_DESC_RINGN_MASK(a) (TX_CFG_DESC_RING0_MASK << (a)*4)
453#define TX_CFG_DESC_RINGN_SHIFT(a) (TX_CFG_DESC_RING0_SHIFT + (a)*4)
454#define TX_CFG_PACED_MODE 0x00100000 /* TX_ALL only set after
455 TX FIFO becomes empty.
456 if 0, TX_ALL set
457 if descr queue empty. */
458#define TX_CFG_DMA_RDPIPE_DIS 0x01000000 /* always set to 1 */
459#define TX_CFG_COMPWB_Q1 0x02000000 /* completion writeback happens at
460 the end of every packet kicked
461 through Q1. */
462#define TX_CFG_COMPWB_Q2 0x04000000 /* completion writeback happens at
463 the end of every packet kicked
464 through Q2. */
465#define TX_CFG_COMPWB_Q3 0x08000000 /* completion writeback happens at
466 the end of every packet kicked
467 through Q3 */
468#define TX_CFG_COMPWB_Q4 0x10000000 /* completion writeback happens at
469 the end of every packet kicked
470 through Q4 */
471#define TX_CFG_INTR_COMPWB_DIS 0x20000000 /* disable pre-interrupt completion
472 writeback */
473#define TX_CFG_CTX_SEL_MASK 0xC0000000 /* selects tx test port
474 connection
475 0b00: tx mac req,
476 tx mac retry req,
477 tx ack and tx tag.
478 0b01: txdma rd req,
479 txdma rd ack,
480 txdma rd rdy,
481 txdma rd type0
482 0b11: txdma wr req,
483 txdma wr ack,
484 txdma wr rdy,
485 txdma wr xfr done. */
486#define TX_CFG_CTX_SEL_SHIFT 30
487
488/* 11-bit counters that point to next location in FIFO to be loaded/retrieved.
489 * used for diagnostics only.
490 */
491#define REG_TX_FIFO_WRITE_PTR 0x2014 /* TX FIFO write pointer */
492#define REG_TX_FIFO_SHADOW_WRITE_PTR 0x2018 /* TX FIFO shadow write
493 pointer. temp hold reg.
494 diagnostics only. */
495#define REG_TX_FIFO_READ_PTR 0x201C /* TX FIFO read pointer */
496#define REG_TX_FIFO_SHADOW_READ_PTR 0x2020 /* TX FIFO shadow read
497 pointer */
498
499/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */
500#define REG_TX_FIFO_PKT_CNT 0x2024 /* TX FIFO packet counter */
501
502/* current state of all state machines in TX */
503#define REG_TX_SM_1 0x2028 /* TX state machine reg #1 */
504#define TX_SM_1_CHAIN_MASK 0x000003FF /* chaining state machine */
505#define TX_SM_1_CSUM_MASK 0x00000C00 /* checksum state machine */
506#define TX_SM_1_FIFO_LOAD_MASK 0x0003F000 /* FIFO load state machine.
507 = 0x01 when TX disabled. */
508#define TX_SM_1_FIFO_UNLOAD_MASK 0x003C0000 /* FIFO unload state machine */
509#define TX_SM_1_CACHE_MASK 0x03C00000 /* desc. prefetch cache controller
510 state machine */
511#define TX_SM_1_CBQ_ARB_MASK 0xF8000000 /* CBQ arbiter state machine */
512
513#define REG_TX_SM_2 0x202C /* TX state machine reg #2 */
514#define TX_SM_2_COMP_WB_MASK 0x07 /* completion writeback sm */
515#define TX_SM_2_SUB_LOAD_MASK 0x38 /* sub load state machine */
516#define TX_SM_2_KICK_MASK 0xC0 /* kick state machine */
517
518/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented
519 * while the upper 23 bits are taken from the TX descriptor
520 */
521#define REG_TX_DATA_PTR_LOW 0x2030 /* TX data pointer low */
522#define REG_TX_DATA_PTR_HI 0x2034 /* TX data pointer high */
523
524/* 13 bit registers written by driver w/ descriptor value that follows
525 * last valid xmit descriptor. kick # and complete # values are used by
526 * the xmit dma engine to control tx descr fetching. if > 1 valid
527 * tx descr is available within the cache line being read, cassini will
528 * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro.
529 */
530#define REG_TX_KICK0 0x2038 /* TX kick reg #1 */
531#define REG_TX_KICKN(x) (REG_TX_KICK0 + (x)*4)
532#define REG_TX_COMP0 0x2048 /* TX completion reg #1 */
533#define REG_TX_COMPN(x) (REG_TX_COMP0 + (x)*4)
534
535/* values of TX_COMPLETE_1-4 are written. each completion register
536 * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment.
537 * NOTE: completion reg values are only written back prior to TX_INTME and
538 * TX_ALL interrupts. at all other times, the most up-to-date index values
539 * should be obtained from the REG_TX_COMPLETE_# registers.
540 * here's the layout:
541 * offset from base addr completion # byte
542 * 0 TX_COMPLETE_1_MSB
543 * 1 TX_COMPLETE_1_LSB
544 * 2 TX_COMPLETE_2_MSB
545 * 3 TX_COMPLETE_2_LSB
546 * 4 TX_COMPLETE_3_MSB
547 * 5 TX_COMPLETE_3_LSB
548 * 6 TX_COMPLETE_4_MSB
549 * 7 TX_COMPLETE_4_LSB
550 */
551#define TX_COMPWB_SIZE 8
552#define REG_TX_COMPWB_DB_LOW 0x2058 /* TX completion write back
553 base low */
554#define REG_TX_COMPWB_DB_HI 0x205C /* TX completion write back
555 base high */
556#define TX_COMPWB_MSB_MASK 0x00000000000000FFULL
557#define TX_COMPWB_MSB_SHIFT 0
558#define TX_COMPWB_LSB_MASK 0x000000000000FF00ULL
559#define TX_COMPWB_LSB_SHIFT 8
560#define TX_COMPWB_NEXT(x) ((x) >> 16)
561
562/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must
563 * be 2KB-aligned. */
564#define REG_TX_DB0_LOW 0x2060 /* TX descriptor base low #1 */
565#define REG_TX_DB0_HI 0x2064 /* TX descriptor base hi #1 */
566#define REG_TX_DBN_LOW(x) (REG_TX_DB0_LOW + (x)*8)
567#define REG_TX_DBN_HI(x) (REG_TX_DB0_HI + (x)*8)
568
569/* 16-bit registers hold weights for the weighted round-robin of the
570 * four CBQ TX descr rings. weights correspond to # bytes xferred from
571 * host to TXFIFO in a round of WRR arbitration. can be set
572 * dynamically with new weights set upon completion of the current
573 * packet transfer from host memory to TXFIFO. a dummy write to any of
574 * these registers causes a queue1 pre-emption with all historical bw
575 * deficit data reset to 0 (useful when congestion requires a
576 * pre-emption/re-allocation of network bandwidth
577 */
578#define REG_TX_MAXBURST_0 0x2080 /* TX MaxBurst #1 */
579#define REG_TX_MAXBURST_1 0x2084 /* TX MaxBurst #2 */
580#define REG_TX_MAXBURST_2 0x2088 /* TX MaxBurst #3 */
581#define REG_TX_MAXBURST_3 0x208C /* TX MaxBurst #4 */
582
583/* diagnostics access to any TX FIFO location. every access is 65
584 * bits. _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit.
585 * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag
586 * bit high. TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if
587 * TX FIFO data integrity is desired, TX DMA should be
588 * disabled. _DATA_HI_Tx should be the last access of the sequence.
589 */
590#define REG_TX_FIFO_ADDR 0x2104 /* TX FIFO address */
591#define REG_TX_FIFO_TAG 0x2108 /* TX FIFO tag */
592#define REG_TX_FIFO_DATA_LOW 0x210C /* TX FIFO data low */
593#define REG_TX_FIFO_DATA_HI_T1 0x2110 /* TX FIFO data high t1 */
594#define REG_TX_FIFO_DATA_HI_T0 0x2114 /* TX FIFO data high t0 */
595#define REG_TX_FIFO_SIZE 0x2118 /* (ro) TX FIFO size = 0x090 = 9KB */
596
597/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST
598 * passed for the specified memory
599 */
600#define REG_TX_RAMBIST 0x211C /* TX RAMBIST control/status */
601#define TX_RAMBIST_STATE 0x01C0 /* progress state of RAMBIST
602 controller state machine */
603#define TX_RAMBIST_RAM33A_PASS 0x0020 /* RAM33A passed */
604#define TX_RAMBIST_RAM32A_PASS 0x0010 /* RAM32A passed */
605#define TX_RAMBIST_RAM33B_PASS 0x0008 /* RAM33B passed */
606#define TX_RAMBIST_RAM32B_PASS 0x0004 /* RAM32B passed */
607#define TX_RAMBIST_SUMMARY 0x0002 /* all RAM passed */
608#define TX_RAMBIST_START 0x0001 /* write 1 to start BIST. self
609 clears on completion. */
610
611/** receive dma registers **/
612#define MAX_RX_DESC_RINGS 2
613#define MAX_RX_COMP_RINGS 4
614
615/* receive DMA channel configuration. default: 0x80910
616 * free ring size = (1 << n)*32 -> [32 - 8k]
617 * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9
618 * DEFAULT: 0x80910
619 */
620#define REG_RX_CFG 0x4000 /* RX config */
621#define RX_CFG_DMA_EN 0x00000001 /* enable RX DMA. 0 stops
622 channel as soon as current
623 frame xfer has completed.
624 driver should disable MAC
625 for 200ms before disabling
626 RX */
627#define RX_CFG_DESC_RING_MASK 0x0000001E /* # desc entries in RX
628 free desc ring.
629 def: 0x8 = 8k */
630#define RX_CFG_DESC_RING_SHIFT 1
631#define RX_CFG_COMP_RING_MASK 0x000001E0 /* # desc entries in RX complete
632 ring. def: 0x8 = 32k */
633#define RX_CFG_COMP_RING_SHIFT 5
634#define RX_CFG_BATCH_DIS 0x00000200 /* disable receive desc
635 batching. def: 0x0 =
636 enabled */
637#define RX_CFG_SWIVEL_MASK 0x00001C00 /* byte offset of the 1st
638 data byte of the packet
639 w/in 8 byte boundares.
640 this swivels the data
641 DMA'ed to header
642 buffers, jumbo buffers
643 when header split is not
644 requested and MTU sized
645 buffers. def: 0x2 */
646#define RX_CFG_SWIVEL_SHIFT 10
647
648/* cassini+ only */
649#define RX_CFG_DESC_RING1_MASK 0x000F0000 /* # of desc entries in
650 RX free desc ring 2.
651 def: 0x8 = 8k */
652#define RX_CFG_DESC_RING1_SHIFT 16
653
654
655/* the page size register allows cassini chips to do the following with
656 * received data:
657 * [--------------------------------------------------------------] page
658 * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad]
659 * |--------------| = PAGE_SIZE_BUFFER_STRIDE
660 * page = PAGE_SIZE
661 * offset = PAGE_SIZE_MTU_OFF
662 * for the above example, MTU_BUFFER_COUNT = 4.
663 * NOTE: as is apparent, you need to ensure that the following holds:
664 * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE
665 * DEFAULT: 0x48002002 (8k pages)
666 */
667#define REG_RX_PAGE_SIZE 0x4004 /* RX page size */
668#define RX_PAGE_SIZE_MASK 0x00000003 /* size of pages pointed to
669 by receive descriptors.
670 if jumbo buffers are
671 supported the page size
672 should not be < 8k.
673 0b00 = 2k, 0b01 = 4k
674 0b10 = 8k, 0b11 = 16k
675 DEFAULT: 8k */
676#define RX_PAGE_SIZE_SHIFT 0
677#define RX_PAGE_SIZE_MTU_COUNT_MASK 0x00007800 /* # of MTU buffers the hw
678 packs into a page.
679 DEFAULT: 4 */
680#define RX_PAGE_SIZE_MTU_COUNT_SHIFT 11
681#define RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate
682 each MTU buffer +
683 offset from each
684 other.
685 0b00 = 1k, 0b01 = 2k
686 0b10 = 4k, 0b11 = 8k
687 DEFAULT: 0x1 */
688#define RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27
689#define RX_PAGE_SIZE_MTU_OFF_MASK 0xC0000000 /* offset in each page that
690 hw writes the MTU buffer
691 into.
692 0b00 = 0,
693 0b01 = 64 bytes
694 0b10 = 96, 0b11 = 128
695 DEFAULT: 0x1 */
696#define RX_PAGE_SIZE_MTU_OFF_SHIFT 30
697
698/* 11-bit counter points to next location in RX FIFO to be loaded/read.
699 * shadow write pointers enable retries in case of early receive aborts.
700 * DEFAULT: 0x0. generated on 64-bit boundaries.
701 */
702#define REG_RX_FIFO_WRITE_PTR 0x4008 /* RX FIFO write pointer */
703#define REG_RX_FIFO_READ_PTR 0x400C /* RX FIFO read pointer */
704#define REG_RX_IPP_FIFO_SHADOW_WRITE_PTR 0x4010 /* RX IPP FIFO shadow write
705 pointer */
706#define REG_RX_IPP_FIFO_SHADOW_READ_PTR 0x4014 /* RX IPP FIFO shadow read
707 pointer */
708#define REG_RX_IPP_FIFO_READ_PTR 0x400C /* RX IPP FIFO read
709 pointer. (8-bit counter) */
710
711/* current state of RX DMA state engines + other info
712 * DEFAULT: 0x0
713 */
714#define REG_RX_DEBUG 0x401C /* RX debug */
715#define RX_DEBUG_LOAD_STATE_MASK 0x0000000F /* load state machine w/ MAC:
716 0x0 = idle, 0x1 = load_bop
717 0x2 = load 1, 0x3 = load 2
718 0x4 = load 3, 0x5 = load 4
719 0x6 = last detect
720 0x7 = wait req
721 0x8 = wait req statuss 1st
722 0x9 = load st
723 0xa = bubble mac
724 0xb = error */
725#define RX_DEBUG_LM_STATE_MASK 0x00000070 /* load state machine w/ HP and
726 RX FIFO:
727 0x0 = idle, 0x1 = hp xfr
728 0x2 = wait hp ready
729 0x3 = wait flow code
730 0x4 = fifo xfer
731 0x5 = make status
732 0x6 = csum ready
733 0x7 = error */
734#define RX_DEBUG_FC_STATE_MASK 0x000000180 /* flow control state machine
735 w/ MAC:
736 0x0 = idle
737 0x1 = wait xoff ack
738 0x2 = wait xon
739 0x3 = wait xon ack */
740#define RX_DEBUG_DATA_STATE_MASK 0x000001E00 /* unload data state machine
741 states:
742 0x0 = idle data
743 0x1 = header begin
744 0x2 = xfer header
745 0x3 = xfer header ld
746 0x4 = mtu begin
747 0x5 = xfer mtu
748 0x6 = xfer mtu ld
749 0x7 = jumbo begin
750 0x8 = xfer jumbo
751 0x9 = xfer jumbo ld
752 0xa = reas begin
753 0xb = xfer reas
754 0xc = flush tag
755 0xd = xfer reas ld
756 0xe = error
757 0xf = bubble idle */
758#define RX_DEBUG_DESC_STATE_MASK 0x0001E000 /* unload desc state machine
759 states:
760 0x0 = idle desc
761 0x1 = wait ack
762 0x9 = wait ack 2
763 0x2 = fetch desc 1
764 0xa = fetch desc 2
765 0x3 = load ptrs
766 0x4 = wait dma
767 0x5 = wait ack batch
768 0x6 = post batch
769 0x7 = xfr done */
770#define RX_DEBUG_INTR_READ_PTR_MASK 0x30000000 /* interrupt read ptr of the
771 interrupt queue */
772#define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer
773 of the interrupt queue */
774
775/* flow control frames are emmitted using two PAUSE thresholds:
776 * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg
777 * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes.
778 * PAUSE thresholds defined in terms of FIFO occupancy and may be translated
779 * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames
780 * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max
781 * value is is 0x6F.
782 * DEFAULT: 0x00078
783 */
784#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */
785#define RX_PAUSE_THRESH_QUANTUM 64
786#define RX_PAUSE_THRESH_OFF_MASK 0x000001FF /* XOFF PAUSE emitted when
787 RX FIFO occupancy >
788 value*64B */
789#define RX_PAUSE_THRESH_OFF_SHIFT 0
790#define RX_PAUSE_THRESH_ON_MASK 0x001FF000 /* XON PAUSE emitted after
791 emitting XOFF PAUSE when RX
792 FIFO occupancy falls below
793 this value*64B. must be
794 < XOFF threshold. if =
795 RX_FIFO_SIZE< XON frames are
796 never emitted. */
797#define RX_PAUSE_THRESH_ON_SHIFT 12
798
799/* 13-bit register used to control RX desc fetching and intr generation. if 4+
800 * valid RX descriptors are available, Cassini will read 4 at a time.
801 * writing N means that all desc up to *but* excluding N are available. N must
802 * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned.
803 * DEFAULT: 0 on reset
804 */
805#define REG_RX_KICK 0x4024 /* RX kick reg */
806
807/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings.
808 * lower 13 bits of the low register are hard-wired to 0.
809 */
810#define REG_RX_DB_LOW 0x4028 /* RX descriptor ring
811 base low */
812#define REG_RX_DB_HI 0x402C /* RX descriptor ring
813 base hi */
814#define REG_RX_CB_LOW 0x4030 /* RX completion ring
815 base low */
816#define REG_RX_CB_HI 0x4034 /* RX completion ring
817 base hi */
818/* 13-bit register indicate desc used by cassini for receive frames. used
819 * for diagnostic purposes.
820 * DEFAULT: 0 on reset
821 */
822#define REG_RX_COMP 0x4038 /* (ro) RX completion */
823
824/* HEAD and TAIL are used to control RX desc posting and interrupt
825 * generation. hw moves the head register to pass ownership to sw. sw
826 * moves the tail register to pass ownership back to hw. to give all
827 * entries to hw, set TAIL = HEAD. if HEAD and TAIL indicate that no
828 * more entries are available, DMA will pause and an interrupt will be
829 * generated to indicate no more entries are available. sw can use
830 * this interrupt to reduce the # of times it must update the
831 * completion tail register.
832 * DEFAULT: 0 on reset
833 */
834#define REG_RX_COMP_HEAD 0x403C /* RX completion head */
835#define REG_RX_COMP_TAIL 0x4040 /* RX completion tail */
836
837/* values used for receive interrupt blanking. loaded each time the ISR is read
838 * DEFAULT: 0x00000000
839 */
840#define REG_RX_BLANK 0x4044 /* RX blanking register
841 for ISR read */
842#define RX_BLANK_INTR_PKT_MASK 0x000001FF /* RX_DONE intr asserted if
843 this many sets of completion
844 writebacks (up to 2 packets)
845 occur since the last time
846 the ISR was read. 0 = no
847 packet blanking */
848#define RX_BLANK_INTR_PKT_SHIFT 0
849#define RX_BLANK_INTR_TIME_MASK 0x3FFFF000 /* RX_DONE interrupt asserted
850 if that many clocks were
851 counted since last time the
852 ISR was read.
853 each count is 512 core
854 clocks (125MHz). 0 = no
855 time blanking */
856#define RX_BLANK_INTR_TIME_SHIFT 12
857
858/* values used for interrupt generation based on threshold values of how
859 * many free desc and completion entries are available for hw use.
860 * DEFAULT: 0x00000000
861 */
862#define REG_RX_AE_THRESH 0x4048 /* RX almost empty
863 thresholds */
864#define RX_AE_THRESH_FREE_MASK 0x00001FFF /* RX_BUF_AE will be
865 generated if # desc
866 avail for hw use <=
867 # */
868#define RX_AE_THRESH_FREE_SHIFT 0
869#define RX_AE_THRESH_COMP_MASK 0x0FFFE000 /* RX_COMP_AE will be
870 generated if # of
871 completion entries
872 avail for hw use <=
873 # */
874#define RX_AE_THRESH_COMP_SHIFT 13
875
876/* probabilities for random early drop (RED) thresholds on a FIFO threshold
877 * basis. probability should increase when the FIFO level increases. control
878 * packets are never dropped and not counted in stats. probability programmed
879 * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped.
880 * DEFAULT: 0x00000000
881 */
882#define REG_RX_RED 0x404C /* RX random early detect enable */
883#define RX_RED_4K_6K_FIFO_MASK 0x000000FF /* 4KB < FIFO thresh < 6KB */
884#define RX_RED_6K_8K_FIFO_MASK 0x0000FF00 /* 6KB < FIFO thresh < 8KB */
885#define RX_RED_8K_10K_FIFO_MASK 0x00FF0000 /* 8KB < FIFO thresh < 10KB */
886#define RX_RED_10K_12K_FIFO_MASK 0xFF000000 /* 10KB < FIFO thresh < 12KB */
887
888/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO.
889 * RX control FIFO = # of packets in RX FIFO.
890 * DEFAULT: 0x0
891 */
892#define REG_RX_FIFO_FULLNESS 0x4050 /* (ro) RX FIFO fullness */
893#define RX_FIFO_FULLNESS_RX_FIFO_MASK 0x3FF80000 /* level w/ 8B granularity */
894#define RX_FIFO_FULLNESS_IPP_FIFO_MASK 0x0007FF00 /* level w/ 8B granularity */
895#define RX_FIFO_FULLNESS_RX_PKT_MASK 0x000000FF /* # packets in RX FIFO */
896#define REG_RX_IPP_PACKET_COUNT 0x4054 /* RX IPP packet counter */
897#define REG_RX_WORK_DMA_PTR_LOW 0x4058 /* RX working DMA ptr low */
898#define REG_RX_WORK_DMA_PTR_HI 0x405C /* RX working DMA ptr
899 high */
900
901/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST
902 * START/COMPLETE is writeable. START will clear when the BIST has completed
903 * checking all 17 RAMS.
904 * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0
905 */
906#define REG_RX_BIST 0x4060 /* (ro) RX BIST */
907#define RX_BIST_32A_PASS 0x80000000 /* RX FIFO 32A passed */
908#define RX_BIST_33A_PASS 0x40000000 /* RX FIFO 33A passed */
909#define RX_BIST_32B_PASS 0x20000000 /* RX FIFO 32B passed */
910#define RX_BIST_33B_PASS 0x10000000 /* RX FIFO 33B passed */
911#define RX_BIST_32C_PASS 0x08000000 /* RX FIFO 32C passed */
912#define RX_BIST_33C_PASS 0x04000000 /* RX FIFO 33C passed */
913#define RX_BIST_IPP_32A_PASS 0x02000000 /* RX IPP FIFO 33B passed */
914#define RX_BIST_IPP_33A_PASS 0x01000000 /* RX IPP FIFO 33A passed */
915#define RX_BIST_IPP_32B_PASS 0x00800000 /* RX IPP FIFO 32B passed */
916#define RX_BIST_IPP_33B_PASS 0x00400000 /* RX IPP FIFO 33B passed */
917#define RX_BIST_IPP_32C_PASS 0x00200000 /* RX IPP FIFO 32C passed */
918#define RX_BIST_IPP_33C_PASS 0x00100000 /* RX IPP FIFO 33C passed */
919#define RX_BIST_CTRL_32_PASS 0x00800000 /* RX CTRL FIFO 32 passed */
920#define RX_BIST_CTRL_33_PASS 0x00400000 /* RX CTRL FIFO 33 passed */
921#define RX_BIST_REAS_26A_PASS 0x00200000 /* RX Reas 26A passed */
922#define RX_BIST_REAS_26B_PASS 0x00100000 /* RX Reas 26B passed */
923#define RX_BIST_REAS_27_PASS 0x00080000 /* RX Reas 27 passed */
924#define RX_BIST_STATE_MASK 0x00078000 /* BIST state machine */
925#define RX_BIST_SUMMARY 0x00000002 /* when BIST complete,
926 summary pass bit
927 contains AND of BIST
928 results of all 16
929 RAMS */
930#define RX_BIST_START 0x00000001 /* write 1 to start
931 BIST. self clears
932 on completion. */
933
934/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read
935 * from to retrieve packet control info.
936 * DEFAULT: 0
937 */
938#define REG_RX_CTRL_FIFO_WRITE_PTR 0x4064 /* (ro) RX control FIFO
939 write ptr */
940#define REG_RX_CTRL_FIFO_READ_PTR 0x4068 /* (ro) RX control FIFO read
941 ptr */
942
943/* receive interrupt blanking. loaded each time interrupt alias register is
944 * read.
945 * DEFAULT: 0x0
946 */
947#define REG_RX_BLANK_ALIAS_READ 0x406C /* RX blanking register for
948 alias read */
949#define RX_BAR_INTR_PACKET_MASK 0x000001FF /* assert RX_DONE if #
950 completion writebacks
951 > # since last ISR
952 read. 0 = no
953 blanking. up to 2
954 packets per
955 completion wb. */
956#define RX_BAR_INTR_TIME_MASK 0x3FFFF000 /* assert RX_DONE if #
957 clocks > # since last
958 ISR read. each count
959 is 512 core clocks
960 (125MHz). 0 = no
961 blanking. */
962
963/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed
964 * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0
965 * will unset the tag bit while writing HI_T1 will set the tag bit. to reset
966 * to normal operation after diagnostics, write to address location 0x0.
967 * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should
968 * be the last write access of a write sequence.
969 * DEFAULT: undefined
970 */
971#define REG_RX_FIFO_ADDR 0x4080 /* RX FIFO address */
972#define REG_RX_FIFO_TAG 0x4084 /* RX FIFO tag */
973#define REG_RX_FIFO_DATA_LOW 0x4088 /* RX FIFO data low */
974#define REG_RX_FIFO_DATA_HI_T0 0x408C /* RX FIFO data high T0 */
975#define REG_RX_FIFO_DATA_HI_T1 0x4090 /* RX FIFO data high T1 */
976
977/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of
978 * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit
979 * accesses. HI is 7-bits with 6-bit flow id and 1 bit control
980 * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI
981 * should be last write access of the write sequence.
982 * DEFAULT: undefined
983 */
984#define REG_RX_CTRL_FIFO_ADDR 0x4094 /* RX Control FIFO and
985 Batching FIFO addr */
986#define REG_RX_CTRL_FIFO_DATA_LOW 0x4098 /* RX Control FIFO data
987 low */
988#define REG_RX_CTRL_FIFO_DATA_MID 0x409C /* RX Control FIFO data
989 mid */
990#define REG_RX_CTRL_FIFO_DATA_HI 0x4100 /* RX Control FIFO data
991 hi and flow id */
992#define RX_CTRL_FIFO_DATA_HI_CTRL 0x0001 /* upper bit of ctrl word */
993#define RX_CTRL_FIFO_DATA_HI_FLOW_MASK 0x007E /* flow id */
994
995/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO.
996 * DEFAULT: undefined
997 */
998#define REG_RX_IPP_FIFO_ADDR 0x4104 /* RX IPP FIFO address */
999#define REG_RX_IPP_FIFO_TAG 0x4108 /* RX IPP FIFO tag */
1000#define REG_RX_IPP_FIFO_DATA_LOW 0x410C /* RX IPP FIFO data low */
1001#define REG_RX_IPP_FIFO_DATA_HI_T0 0x4110 /* RX IPP FIFO data high
1002 T0 */
1003#define REG_RX_IPP_FIFO_DATA_HI_T1 0x4114 /* RX IPP FIFO data high
1004 T1 */
1005
1006/* 64-bit pointer to receive data buffer in host memory used for headers and
1007 * small packets. MSB in high register. loaded by DMA state machine and
1008 * increments as DMA writes receive data. only 50 LSB are incremented. top
1009 * 13 bits taken from RX descriptor.
1010 * DEFAULT: undefined
1011 */
1012#define REG_RX_HEADER_PAGE_PTR_LOW 0x4118 /* (ro) RX header page ptr
1013 low */
1014#define REG_RX_HEADER_PAGE_PTR_HI 0x411C /* (ro) RX header page ptr
1015 high */
1016#define REG_RX_MTU_PAGE_PTR_LOW 0x4120 /* (ro) RX MTU page pointer
1017 low */
1018#define REG_RX_MTU_PAGE_PTR_HI 0x4124 /* (ro) RX MTU page pointer
1019 high */
1020
1021/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds
1022 * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of
1023 * one of the 64 byte locations in the Batching table. LOW holds 32 LSB.
1024 * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set
1025 * to 0 for PIO access. DATA_HIGH should be last write of write sequence.
1026 * layout:
1027 * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0]
1028 * DEFAULT: undefined
1029 */
1030#define REG_RX_TABLE_ADDR 0x4128 /* RX reassembly DMA table
1031 address */
1032#define RX_TABLE_ADDR_MASK 0x0000003F /* address mask */
1033
1034#define REG_RX_TABLE_DATA_LOW 0x412C /* RX reassembly DMA table
1035 data low */
1036#define REG_RX_TABLE_DATA_MID 0x4130 /* RX reassembly DMA table
1037 data mid */
1038#define REG_RX_TABLE_DATA_HI 0x4134 /* RX reassembly DMA table
1039 data high */
1040
1041/* cassini+ only */
1042/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to
1043 * 0. same semantics as primary desc/complete rings.
1044 */
1045#define REG_PLUS_RX_DB1_LOW 0x4200 /* RX descriptor ring
1046 2 base low */
1047#define REG_PLUS_RX_DB1_HI 0x4204 /* RX descriptor ring
1048 2 base high */
1049#define REG_PLUS_RX_CB1_LOW 0x4208 /* RX completion ring
1050 2 base low. 4 total */
1051#define REG_PLUS_RX_CB1_HI 0x420C /* RX completion ring
1052 2 base high. 4 total */
1053#define REG_PLUS_RX_CBN_LOW(x) (REG_PLUS_RX_CB1_LOW + 8*((x) - 1))
1054#define REG_PLUS_RX_CBN_HI(x) (REG_PLUS_RX_CB1_HI + 8*((x) - 1))
1055#define REG_PLUS_RX_KICK1 0x4220 /* RX Kick 2 register */
1056#define REG_PLUS_RX_COMP1 0x4224 /* (ro) RX completion 2
1057 reg */
1058#define REG_PLUS_RX_COMP1_HEAD 0x4228 /* (ro) RX completion 2
1059 head reg. 4 total. */
1060#define REG_PLUS_RX_COMP1_TAIL 0x422C /* RX completion 2
1061 tail reg. 4 total. */
1062#define REG_PLUS_RX_COMPN_HEAD(x) (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1))
1063#define REG_PLUS_RX_COMPN_TAIL(x) (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1))
1064#define REG_PLUS_RX_AE1_THRESH 0x4240 /* RX almost empty 2
1065 thresholds */
1066#define RX_AE1_THRESH_FREE_MASK RX_AE_THRESH_FREE_MASK
1067#define RX_AE1_THRESH_FREE_SHIFT RX_AE_THRESH_FREE_SHIFT
1068
1069/** header parser registers **/
1070
1071/* RX parser configuration register.
1072 * DEFAULT: 0x1651004
1073 */
1074#define REG_HP_CFG 0x4140 /* header parser
1075 configuration reg */
1076#define HP_CFG_PARSE_EN 0x00000001 /* enab header parsing */
1077#define HP_CFG_NUM_CPU_MASK 0x000000FC /* # processors
1078 0 = 64. 0x3f = 63 */
1079#define HP_CFG_NUM_CPU_SHIFT 2
1080#define HP_CFG_SYN_INC_MASK 0x00000100 /* SYN bit won't increment
1081 TCP seq # by one when
1082 stored in FDBM */
1083#define HP_CFG_TCP_THRESH_MASK 0x000FFE00 /* # bytes of TCP data
1084 needed to be considered
1085 for reassembly */
1086#define HP_CFG_TCP_THRESH_SHIFT 9
1087
1088/* access to RX Instruction RAM. 5-bit register/counter holds addr
1089 * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI.
1090 * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access
1091 * of sequence.
1092 * DEFAULT: undefined
1093 */
1094#define REG_HP_INSTR_RAM_ADDR 0x4144 /* HP instruction RAM
1095 address */
1096#define HP_INSTR_RAM_ADDR_MASK 0x01F /* 5-bit mask */
1097#define REG_HP_INSTR_RAM_DATA_LOW 0x4148 /* HP instruction RAM
1098 data low */
1099#define HP_INSTR_RAM_LOW_OUTMASK_MASK 0x0000FFFF
1100#define HP_INSTR_RAM_LOW_OUTMASK_SHIFT 0
1101#define HP_INSTR_RAM_LOW_OUTSHIFT_MASK 0x000F0000
1102#define HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16
1103#define HP_INSTR_RAM_LOW_OUTEN_MASK 0x00300000
1104#define HP_INSTR_RAM_LOW_OUTEN_SHIFT 20
1105#define HP_INSTR_RAM_LOW_OUTARG_MASK 0xFFC00000
1106#define HP_INSTR_RAM_LOW_OUTARG_SHIFT 22
1107#define REG_HP_INSTR_RAM_DATA_MID 0x414C /* HP instruction RAM
1108 data mid */
1109#define HP_INSTR_RAM_MID_OUTARG_MASK 0x00000003
1110#define HP_INSTR_RAM_MID_OUTARG_SHIFT 0
1111#define HP_INSTR_RAM_MID_OUTOP_MASK 0x0000003C
1112#define HP_INSTR_RAM_MID_OUTOP_SHIFT 2
1113#define HP_INSTR_RAM_MID_FNEXT_MASK 0x000007C0
1114#define HP_INSTR_RAM_MID_FNEXT_SHIFT 6
1115#define HP_INSTR_RAM_MID_FOFF_MASK 0x0003F800
1116#define HP_INSTR_RAM_MID_FOFF_SHIFT 11
1117#define HP_INSTR_RAM_MID_SNEXT_MASK 0x007C0000
1118#define HP_INSTR_RAM_MID_SNEXT_SHIFT 18
1119#define HP_INSTR_RAM_MID_SOFF_MASK 0x3F800000
1120#define HP_INSTR_RAM_MID_SOFF_SHIFT 23
1121#define HP_INSTR_RAM_MID_OP_MASK 0xC0000000
1122#define HP_INSTR_RAM_MID_OP_SHIFT 30
1123#define REG_HP_INSTR_RAM_DATA_HI 0x4150 /* HP instruction RAM
1124 data high */
1125#define HP_INSTR_RAM_HI_VAL_MASK 0x0000FFFF
1126#define HP_INSTR_RAM_HI_VAL_SHIFT 0
1127#define HP_INSTR_RAM_HI_MASK_MASK 0xFFFF0000
1128#define HP_INSTR_RAM_HI_MASK_SHIFT 16
1129
1130/* PIO access into RX Header parser data RAM and flow database.
1131 * 11-bit register. Data fills the LSB portion of bus if less than 32 bits.
1132 * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM.
1133 * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120]
1134 * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access
1135 * flow database.
1136 * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg
1137 * should be the last write access of the write sequence.
1138 * DEFAULT: undefined
1139 */
1140#define REG_HP_DATA_RAM_FDB_ADDR 0x4154 /* HP data and FDB
1141 RAM address */
1142#define HP_DATA_RAM_FDB_DATA_MASK 0x001F /* select 1 of 86 byte
1143 locations in header
1144 parser data ram to
1145 read/write */
1146#define HP_DATA_RAM_FDB_FDB_MASK 0x3F00 /* 1 of 64 353-bit locations
1147 in the flow database */
1148#define REG_HP_DATA_RAM_DATA 0x4158 /* HP data RAM data */
1149
1150/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes
1151 * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64]
1152 * FLOW_DB(3) = IP_SA[63:32], FLOW_DB(4) = IP_SA[31:0]
1153 * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64]
1154 * FLOW_DB(7) = IP_DA[63:32], FLOW_DB(8) = IP_DA[31:0]
1155 * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]}
1156 * FLOW_DB(10) = bit 0 has value for flow valid
1157 * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0]
1158 */
1159#define REG_HP_FLOW_DB0 0x415C /* HP flow database 1 reg */
1160#define REG_HP_FLOW_DBN(x) (REG_HP_FLOW_DB0 + (x)*4)
1161
1162/* diagnostics for RX Header Parser block.
1163 * ASUN: the header parser state machine register is used for diagnostics
1164 * purposes. however, the spec doesn't have any details on it.
1165 */
1166#define REG_HP_STATE_MACHINE 0x418C /* (ro) HP state machine */
1167#define REG_HP_STATUS0 0x4190 /* (ro) HP status 1 */
1168#define HP_STATUS0_SAP_MASK 0xFFFF0000 /* SAP */
1169#define HP_STATUS0_L3_OFF_MASK 0x0000FE00 /* L3 offset */
1170#define HP_STATUS0_LB_CPUNUM_MASK 0x000001F8 /* load balancing CPU
1171 number */
1172#define HP_STATUS0_HRP_OPCODE_MASK 0x00000007 /* HRP opcode */
1173
1174#define REG_HP_STATUS1 0x4194 /* (ro) HP status 2 */
1175#define HP_STATUS1_ACCUR2_MASK 0xE0000000 /* accu R2[6:4] */
1176#define HP_STATUS1_FLOWID_MASK 0x1F800000 /* flow id */
1177#define HP_STATUS1_TCP_OFF_MASK 0x007F0000 /* tcp payload offset */
1178#define HP_STATUS1_TCP_SIZE_MASK 0x0000FFFF /* tcp payload size */
1179
1180#define REG_HP_STATUS2 0x4198 /* (ro) HP status 3 */
1181#define HP_STATUS2_ACCUR2_MASK 0xF0000000 /* accu R2[3:0] */
1182#define HP_STATUS2_CSUM_OFF_MASK 0x07F00000 /* checksum start
1183 start offset */
1184#define HP_STATUS2_ACCUR1_MASK 0x000FE000 /* accu R1 */
1185#define HP_STATUS2_FORCE_DROP 0x00001000 /* force drop */
1186#define HP_STATUS2_BWO_REASSM 0x00000800 /* batching w/o
1187 reassembly */
1188#define HP_STATUS2_JH_SPLIT_EN 0x00000400 /* jumbo header split
1189 enable */
1190#define HP_STATUS2_FORCE_TCP_NOCHECK 0x00000200 /* force tcp no payload
1191 check */
1192#define HP_STATUS2_DATA_MASK_ZERO 0x00000100 /* mask of data length
1193 equal to zero */
1194#define HP_STATUS2_FORCE_TCP_CHECK 0x00000080 /* force tcp payload
1195 chk */
1196#define HP_STATUS2_MASK_TCP_THRESH 0x00000040 /* mask of payload
1197 threshold */
1198#define HP_STATUS2_NO_ASSIST 0x00000020 /* no assist */
1199#define HP_STATUS2_CTRL_PACKET_FLAG 0x00000010 /* control packet flag */
1200#define HP_STATUS2_TCP_FLAG_CHECK 0x00000008 /* tcp flag check */
1201#define HP_STATUS2_SYN_FLAG 0x00000004 /* syn flag */
1202#define HP_STATUS2_TCP_CHECK 0x00000002 /* tcp payload chk */
1203#define HP_STATUS2_TCP_NOCHECK 0x00000001 /* tcp no payload chk */
1204
1205/* BIST for header parser(HP) and flow database memories (FDBM). set _START
1206 * to start BIST. controller clears _START on completion. _START can also
1207 * be cleared to force termination of BIST. a bit set indicates that that
1208 * memory passed its BIST.
1209 */
1210#define REG_HP_RAM_BIST 0x419C /* HP RAM BIST reg */
1211#define HP_RAM_BIST_HP_DATA_PASS 0x80000000 /* HP data ram */
1212#define HP_RAM_BIST_HP_INSTR0_PASS 0x40000000 /* HP instr ram 0 */
1213#define HP_RAM_BIST_HP_INSTR1_PASS 0x20000000 /* HP instr ram 1 */
1214#define HP_RAM_BIST_HP_INSTR2_PASS 0x10000000 /* HP instr ram 2 */
1215#define HP_RAM_BIST_FDBM_AGE0_PASS 0x08000000 /* FDBM aging RAM0 */
1216#define HP_RAM_BIST_FDBM_AGE1_PASS 0x04000000 /* FDBM aging RAM1 */
1217#define HP_RAM_BIST_FDBM_FLOWID00_PASS 0x02000000 /* FDBM flowid RAM0
1218 bank 0 */
1219#define HP_RAM_BIST_FDBM_FLOWID10_PASS 0x01000000 /* FDBM flowid RAM1
1220 bank 0 */
1221#define HP_RAM_BIST_FDBM_FLOWID20_PASS 0x00800000 /* FDBM flowid RAM2
1222 bank 0 */
1223#define HP_RAM_BIST_FDBM_FLOWID30_PASS 0x00400000 /* FDBM flowid RAM3
1224 bank 0 */
1225#define HP_RAM_BIST_FDBM_FLOWID01_PASS 0x00200000 /* FDBM flowid RAM0
1226 bank 1 */
1227#define HP_RAM_BIST_FDBM_FLOWID11_PASS 0x00100000 /* FDBM flowid RAM1
1228 bank 2 */
1229#define HP_RAM_BIST_FDBM_FLOWID21_PASS 0x00080000 /* FDBM flowid RAM2
1230 bank 1 */
1231#define HP_RAM_BIST_FDBM_FLOWID31_PASS 0x00040000 /* FDBM flowid RAM3
1232 bank 1 */
1233#define HP_RAM_BIST_FDBM_TCPSEQ_PASS 0x00020000 /* FDBM tcp sequence
1234 RAM */
1235#define HP_RAM_BIST_SUMMARY 0x00000002 /* all BIST tests */
1236#define HP_RAM_BIST_START 0x00000001 /* start/stop BIST */
1237
1238
1239/** MAC registers. **/
1240/* reset bits are set using a PIO write and self-cleared after the command
1241 * execution has completed.
1242 */
1243#define REG_MAC_TX_RESET 0x6000 /* TX MAC software reset
1244 command (default: 0x0) */
1245#define REG_MAC_RX_RESET 0x6004 /* RX MAC software reset
1246 command (default: 0x0) */
1247/* execute a pause flow control frame transmission
1248 DEFAULT: 0x0XXXX */
1249#define REG_MAC_SEND_PAUSE 0x6008 /* send pause command reg */
1250#define MAC_SEND_PAUSE_TIME_MASK 0x0000FFFF /* value of pause time
1251 to be sent on network
1252 in units of slot
1253 times */
1254#define MAC_SEND_PAUSE_SEND 0x00010000 /* send pause flow ctrl
1255 frame on network */
1256
1257/* bit set indicates that event occurred. auto-cleared when status register
1258 * is read and have corresponding mask bits in mask register. events will
1259 * trigger an interrupt if the corresponding mask bit is 0.
1260 * status register default: 0x00000000
1261 * mask register default = 0xFFFFFFFF on reset
1262 */
1263#define REG_MAC_TX_STATUS 0x6010 /* TX MAC status reg */
1264#define MAC_TX_FRAME_XMIT 0x0001 /* successful frame
1265 transmision */
1266#define MAC_TX_UNDERRUN 0x0002 /* terminated frame
1267 transmission due to
1268 data starvation in the
1269 xmit data path */
1270#define MAC_TX_MAX_PACKET_ERR 0x0004 /* frame exceeds max allowed
1271 length passed to TX MAC
1272 by the DMA engine */
1273#define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal
1274 collision counter */
1275#define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive
1276 collision counter */
1277#define MAC_TX_COLL_LATE 0x0020 /* rollover of the late
1278 collision counter */
1279#define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first
1280 collision counter */
1281#define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer
1282 timer */
1283#define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak
1284 attempts counter */
1285
1286#define REG_MAC_RX_STATUS 0x6014 /* RX MAC status reg */
1287#define MAC_RX_FRAME_RECV 0x0001 /* successful receipt of
1288 a frame */
1289#define MAC_RX_OVERFLOW 0x0002 /* dropped frame due to
1290 RX FIFO overflow */
1291#define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame
1292 counter */
1293#define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment
1294 error counter */
1295#define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error
1296 counter */
1297#define MAC_RX_LEN_ERR 0x0020 /* rollover of length
1298 error counter */
1299#define MAC_RX_VIOL_ERR 0x0040 /* rollover of code
1300 violation error */
1301
1302/* DEFAULT: 0xXXXX0000 on reset */
1303#define REG_MAC_CTRL_STATUS 0x6018 /* MAC control status reg */
1304#define MAC_CTRL_PAUSE_RECEIVED 0x00000001 /* successful
1305 reception of a
1306 pause control
1307 frame */
1308#define MAC_CTRL_PAUSE_STATE 0x00000002 /* MAC has made a
1309 transition from
1310 "not paused" to
1311 "paused" */
1312#define MAC_CTRL_NOPAUSE_STATE 0x00000004 /* MAC has made a
1313 transition from
1314 "paused" to "not
1315 paused" */
1316#define MAC_CTRL_PAUSE_TIME_MASK 0xFFFF0000 /* value of pause time
1317 operand that was
1318 received in the last
1319 pause flow control
1320 frame */
1321
1322/* layout identical to TX MAC[8:0] */
1323#define REG_MAC_TX_MASK 0x6020 /* TX MAC mask reg */
1324/* layout identical to RX MAC[6:0] */
1325#define REG_MAC_RX_MASK 0x6024 /* RX MAC mask reg */
1326/* layout identical to CTRL MAC[2:0] */
1327#define REG_MAC_CTRL_MASK 0x6028 /* MAC control mask reg */
1328
1329/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay
1330 * imposed before writes to other bits in the TX_MAC_CFG register or any of
1331 * the MAC parameters is performed. delay dependent upon time required to
1332 * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g.,
1333 * the delay for a 1518-byte frame on a 100Mbps network is 125us.
1334 * alternatively, just poll TX_CFG_EN until it reads back as 0.
1335 * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and
1336 * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should
1337 * be 0x200 (slot time of 512 bytes)
1338 */
1339#define REG_MAC_TX_CFG 0x6030 /* TX MAC config reg */
1340#define MAC_TX_CFG_EN 0x0001 /* enable TX MAC. 0 will
1341 force TXMAC state
1342 machine to remain in
1343 idle state or to
1344 transition to idle state
1345 on completion of an
1346 ongoing packet. */
1347#define MAC_TX_CFG_IGNORE_CARRIER 0x0002 /* disable CSMA/CD deferral
1348 process. set to 1 when
1349 full duplex and 0 when
1350 half duplex */
1351#define MAC_TX_CFG_IGNORE_COLL 0x0004 /* disable CSMA/CD backoff
1352 algorithm. set to 1 when
1353 full duplex and 0 when
1354 half duplex */
1355#define MAC_TX_CFG_IPG_EN 0x0008 /* enable extension of the
1356 Rx-to-TX IPG. after
1357 receiving a frame, TX
1358 MAC will reset its
1359 deferral process to
1360 carrier sense for the
1361 amount of time = IPG0 +
1362 IPG1 and commit to
1363 transmission for time
1364 specified in IPG2. when
1365 0 or when xmitting frames
1366 back-to-pack (Tx-to-Tx
1367 IPG), TX MAC ignores
1368 IPG0 and will only use
1369 IPG1 for deferral time.
1370 IPG2 still used. */
1371#define MAC_TX_CFG_NEVER_GIVE_UP_EN 0x0010 /* TX MAC will not easily
1372 give up on frame
1373 xmission. if backoff
1374 algorithm reaches the
1375 ATTEMPT_LIMIT, it will
1376 clear attempts counter
1377 and continue trying to
1378 send the frame as
1379 specified by
1380 GIVE_UP_LIM. when 0,
1381 TX MAC will execute
1382 standard CSMA/CD prot. */
1383#define MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020 /* when set, TX MAC will
1384 continue to try to xmit
1385 until successful. when
1386 0, TX MAC will continue
1387 to try xmitting until
1388 successful or backoff
1389 algorithm reaches
1390 ATTEMPT_LIMIT*16 */
1391#define MAC_TX_CFG_NO_BACKOFF 0x0040 /* modify CSMA/CD to disable
1392 backoff algorithm. TX
1393 MAC will not back off
1394 after a xmission attempt
1395 that resulted in a
1396 collision. */
1397#define MAC_TX_CFG_SLOW_DOWN 0x0080 /* modify CSMA/CD so that
1398 deferral process is reset
1399 in response to carrier
1400 sense during the entire
1401 duration of IPG. TX MAC
1402 will only commit to frame
1403 xmission after frame
1404 xmission has actually
1405 begun. */
1406#define MAC_TX_CFG_NO_FCS 0x0100 /* TX MAC will not generate
1407 CRC for all xmitted
1408 packets. when clear, CRC
1409 generation is dependent
1410 upon NO_CRC bit in the
1411 xmit control word from
1412 TX DMA */
1413#define MAC_TX_CFG_CARRIER_EXTEND 0x0200 /* enables xmit part of the
1414 carrier extension
1415 feature. this allows for
1416 longer collision domains
1417 by extending the carrier
1418 and collision window
1419 from the end of FCS until
1420 the end of the slot time
1421 if necessary. Required
1422 for half-duplex at 1Gbps,
1423 clear otherwise. */
1424
1425/* when CRC is not stripped, reassembly packets will not contain the CRC.
1426 * these will be stripped by HRP because it reassembles layer 4 data, and the
1427 * CRC is layer 2. however, non-reassembly packets will still contain the CRC
1428 * when passed to the host. to ensure proper operation, need to wait 3.2ms
1429 * after clearing RX_CFG_EN before writing to any other RX MAC registers
1430 * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears
1431 * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same
1432 * restrictions as CFG_EN.
1433 */
1434#define REG_MAC_RX_CFG 0x6034 /* RX MAC config reg */
1435#define MAC_RX_CFG_EN 0x0001 /* enable RX MAC */
1436#define MAC_RX_CFG_STRIP_PAD 0x0002 /* always program to 0.
1437 feature not supported */
1438#define MAC_RX_CFG_STRIP_FCS 0x0004 /* RX MAC will strip the
1439 last 4 bytes of a
1440 received frame. */
1441#define MAC_RX_CFG_PROMISC_EN 0x0008 /* promiscuous mode */
1442#define MAC_RX_CFG_PROMISC_GROUP_EN 0x0010 /* accept all valid
1443 multicast frames (group
1444 bit in DA field set) */
1445#define MAC_RX_CFG_HASH_FILTER_EN 0x0020 /* use hash table to filter
1446 multicast addresses */
1447#define MAC_RX_CFG_ADDR_FILTER_EN 0x0040 /* cause RX MAC to use
1448 address filtering regs
1449 to filter both unicast
1450 and multicast
1451 addresses */
1452#define MAC_RX_CFG_DISABLE_DISCARD 0x0080 /* pass errored frames to
1453 RX DMA by setting BAD
1454 bit but not Abort bit
1455 in the status. CRC,
1456 framing, and length errs
1457 will not increment
1458 error counters. frames
1459 which don't match dest
1460 addr will be passed up
1461 w/ BAD bit set. */
1462#define MAC_RX_CFG_CARRIER_EXTEND 0x0100 /* enable reception of
1463 packet bursts generated
1464 by carrier extension
1465 with packet bursting
1466 senders. only applies
1467 to half-duplex 1Gbps */
1468
1469/* DEFAULT: 0x0 */
1470#define REG_MAC_CTRL_CFG 0x6038 /* MAC control config reg */
1471#define MAC_CTRL_CFG_SEND_PAUSE_EN 0x0001 /* respond to requests for
1472 sending pause flow ctrl
1473 frames */
1474#define MAC_CTRL_CFG_RECV_PAUSE_EN 0x0002 /* respond to received
1475 pause flow ctrl frames */
1476#define MAC_CTRL_CFG_PASS_CTRL 0x0004 /* pass valid MAC ctrl
1477 packets to RX DMA */
1478
1479/* to ensure proper operation, a global initialization sequence should be
1480 * performed when a loopback config is entered or exited. if programmed after
1481 * a hw or global sw reset, RX/TX MAC software reset and initialization
1482 * should be done to ensure stable clocking.
1483 * DEFAULT: 0x0
1484 */
1485#define REG_MAC_XIF_CFG 0x603C /* XIF config reg */
1486#define MAC_XIF_TX_MII_OUTPUT_EN 0x0001 /* enable output drivers
1487 on MII xmit bus */
1488#define MAC_XIF_MII_INT_LOOPBACK 0x0002 /* loopback GMII xmit data
1489 path to GMII recv data
1490 path. phy mode register
1491 clock selection must be
1492 set to GMII mode and
1493 GMII_MODE should be set
1494 to 1. in loopback mode,
1495 REFCLK will drive the
1496 entire mac core. 0 for
1497 normal operation. */
1498#define MAC_XIF_DISABLE_ECHO 0x0004 /* disables receive data
1499 path during packet
1500 xmission. clear to 0
1501 in any full duplex mode,
1502 in any loopback mode,
1503 or in half-duplex SERDES
1504 or SLINK modes. set when
1505 in half-duplex when
1506 using external phy. */
1507#define MAC_XIF_GMII_MODE 0x0008 /* MAC operates with GMII
1508 clocks and datapath */
1509#define MAC_XIF_MII_BUFFER_OUTPUT_EN 0x0010 /* MII_BUF_EN pin. enable
1510 external tristate buffer
1511 on the MII receive
1512 bus. */
1513#define MAC_XIF_LINK_LED 0x0020 /* LINKLED# active (low) */
1514#define MAC_XIF_FDPLX_LED 0x0040 /* FDPLXLED# active (low) */
1515
1516#define REG_MAC_IPG0 0x6040 /* inter-packet gap0 reg.
1517 recommended: 0x00 */
1518#define REG_MAC_IPG1 0x6044 /* inter-packet gap1 reg
1519 recommended: 0x08 */
1520#define REG_MAC_IPG2 0x6048 /* inter-packet gap2 reg
1521 recommended: 0x04 */
1522#define REG_MAC_SLOT_TIME 0x604C /* slot time reg
1523 recommended: 0x40 */
1524#define REG_MAC_FRAMESIZE_MIN 0x6050 /* min frame size reg
1525 recommended: 0x40 */
1526
1527/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size.
1528 * recommended value: 0x2000.05EE
1529 */
1530#define REG_MAC_FRAMESIZE_MAX 0x6054 /* max frame size reg */
1531#define MAC_FRAMESIZE_MAX_BURST_MASK 0x3FFF0000 /* max burst size */
1532#define MAC_FRAMESIZE_MAX_BURST_SHIFT 16
1533#define MAC_FRAMESIZE_MAX_FRAME_MASK 0x00007FFF /* max frame size */
1534#define MAC_FRAMESIZE_MAX_FRAME_SHIFT 0
1535#define REG_MAC_PA_SIZE 0x6058 /* PA size reg. number of
1536 preamble bytes that the
1537 TX MAC will xmit at the
1538 beginning of each frame
1539 value should be 2 or
1540 greater. recommended
1541 value: 0x07 */
1542#define REG_MAC_JAM_SIZE 0x605C /* jam size reg. duration
1543 of jam in units of media
1544 byte time. recommended
1545 value: 0x04 */
1546#define REG_MAC_ATTEMPT_LIMIT 0x6060 /* attempt limit reg. #
1547 of attempts TX MAC will
1548 make to xmit a frame
1549 before it resets its
1550 attempts counter. after
1551 the limit has been
1552 reached, TX MAC may or
1553 may not drop the frame
1554 dependent upon value
1555 in TX_MAC_CFG.
1556 recommended
1557 value: 0x10 */
1558#define REG_MAC_CTRL_TYPE 0x6064 /* MAC control type reg.
1559 type field of a MAC
1560 ctrl frame. recommended
1561 value: 0x8808 */
1562
1563/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes.
1564 * register contains comparison
1565 * 0 16 MSB of primary MAC addr [47:32] of DA field
1566 * 1 16 middle bits "" [31:16] of DA field
1567 * 2 16 LSB "" [15:0] of DA field
1568 * 3*x 16MSB of alt MAC addr 1-15 [47:32] of DA field
1569 * 4*x 16 middle bits "" [31:16]
1570 * 5*x 16 LSB "" [15:0]
1571 * 42 16 MSB of MAC CTRL addr [47:32] of DA.
1572 * 43 16 middle bits "" [31:16]
1573 * 44 16 LSB "" [15:0]
1574 * MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames.
1575 * if there is a match, MAC will set the bit for alternative address
1576 * filter pass [15]
1577
1578 * here is the map of registers given MAC address notation: a:b:c:d:e:f
1579 * ab cd ef
1580 * primary addr reg 2 reg 1 reg 0
1581 * alt addr 1 reg 5 reg 4 reg 3
1582 * alt addr x reg 5*x reg 4*x reg 3*x
1583 * ctrl addr reg 44 reg 43 reg 42
1584 */
1585#define REG_MAC_ADDR0 0x6080 /* MAC address 0 reg */
1586#define REG_MAC_ADDRN(x) (REG_MAC_ADDR0 + (x)*4)
1587#define REG_MAC_ADDR_FILTER0 0x614C /* address filter 0 reg
1588 [47:32] */
1589#define REG_MAC_ADDR_FILTER1 0x6150 /* address filter 1 reg
1590 [31:16] */
1591#define REG_MAC_ADDR_FILTER2 0x6154 /* address filter 2 reg
1592 [15:0] */
1593#define REG_MAC_ADDR_FILTER2_1_MASK 0x6158 /* address filter 2 and 1
1594 mask reg. 8-bit reg
1595 contains nibble mask for
1596 reg 2 and 1. */
1597#define REG_MAC_ADDR_FILTER0_MASK 0x615C /* address filter 0 mask
1598 reg */
1599
1600/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes
1601 * 16-bit registers contain bits of the hash table.
1602 * reg x -> [16*(15 - x) + 15 : 16*(15 - x)].
1603 * e.g., 15 -> [15:0], 0 -> [255:240]
1604 */
1605#define REG_MAC_HASH_TABLE0 0x6160 /* hash table 0 reg */
1606#define REG_MAC_HASH_TABLEN(x) (REG_MAC_HASH_TABLE0 + (x)*4)
1607
1608/* statistics registers. these registers generate an interrupt on
1609 * overflow. recommended initialization: 0x0000. most are 16-bits except
1610 * for PEAK_ATTEMPTS register which is 8 bits.
1611 */
1612#define REG_MAC_COLL_NORMAL 0x61A0 /* normal collision
1613 counter. */
1614#define REG_MAC_COLL_FIRST 0x61A4 /* first attempt
1615 successful collision
1616 counter */
1617#define REG_MAC_COLL_EXCESS 0x61A8 /* excessive collision
1618 counter */
1619#define REG_MAC_COLL_LATE 0x61AC /* late collision counter */
1620#define REG_MAC_TIMER_DEFER 0x61B0 /* defer timer. time base
1621 is the media byte
1622 clock/256 */
1623#define REG_MAC_ATTEMPTS_PEAK 0x61B4 /* peak attempts reg */
1624#define REG_MAC_RECV_FRAME 0x61B8 /* receive frame counter */
1625#define REG_MAC_LEN_ERR 0x61BC /* length error counter */
1626#define REG_MAC_ALIGN_ERR 0x61C0 /* alignment error counter */
1627#define REG_MAC_FCS_ERR 0x61C4 /* FCS error counter */
1628#define REG_MAC_RX_CODE_ERR 0x61C8 /* RX code violation
1629 error counter */
1630
1631/* misc registers */
1632#define REG_MAC_RANDOM_SEED 0x61CC /* random number seed reg.
1633 10-bit register used as a
1634 seed for the random number
1635 generator for the CSMA/CD
1636 backoff algorithm. only
1637 programmed after power-on
1638 reset and should be a
1639 random value which has a
1640 high likelihood of being
1641 unique for each MAC
1642 attached to a network
1643 segment (e.g., 10 LSB of
1644 MAC address) */
1645
1646/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address
1647 * map
1648 */
1649
1650/* 27-bit register has the current state for key state machines in the MAC */
1651#define REG_MAC_STATE_MACHINE 0x61D0 /* (ro) state machine reg */
1652#define MAC_SM_RLM_MASK 0x07800000
1653#define MAC_SM_RLM_SHIFT 23
1654#define MAC_SM_RX_FC_MASK 0x00700000
1655#define MAC_SM_RX_FC_SHIFT 20
1656#define MAC_SM_TLM_MASK 0x000F0000
1657#define MAC_SM_TLM_SHIFT 16
1658#define MAC_SM_ENCAP_SM_MASK 0x0000F000
1659#define MAC_SM_ENCAP_SM_SHIFT 12
1660#define MAC_SM_TX_REQ_MASK 0x00000C00
1661#define MAC_SM_TX_REQ_SHIFT 10
1662#define MAC_SM_TX_FC_MASK 0x000003C0
1663#define MAC_SM_TX_FC_SHIFT 6
1664#define MAC_SM_FIFO_WRITE_SEL_MASK 0x00000038
1665#define MAC_SM_FIFO_WRITE_SEL_SHIFT 3
1666#define MAC_SM_TX_FIFO_EMPTY_MASK 0x00000007
1667#define MAC_SM_TX_FIFO_EMPTY_SHIFT 0
1668
1669/** MIF registers. the MIF can be programmed in either bit-bang or
1670 * frame mode.
1671 **/
1672#define REG_MIF_BIT_BANG_CLOCK 0x6200 /* MIF bit-bang clock.
1673 1 -> 0 will generate a
1674 rising edge. 0 -> 1 will
1675 generate a falling edge. */
1676#define REG_MIF_BIT_BANG_DATA 0x6204 /* MIF bit-bang data. 1-bit
1677 register generates data */
1678#define REG_MIF_BIT_BANG_OUTPUT_EN 0x6208 /* MIF bit-bang output
1679 enable. enable when
1680 xmitting data from MIF to
1681 transceiver. */
1682
1683/* 32-bit register serves as an instruction register when the MIF is
1684 * programmed in frame mode. load this register w/ a valid instruction
1685 * (as per IEEE 802.3u MII spec). poll this register to check for instruction
1686 * execution completion. during a read operation, this register will also
1687 * contain the 16-bit data returned by the tranceiver. unless specified
1688 * otherwise, fields are considered "don't care" when polling for
1689 * completion.
1690 */
1691#define REG_MIF_FRAME 0x620C /* MIF frame/output reg */
1692#define MIF_FRAME_START_MASK 0xC0000000 /* start of frame.
1693 load w/ 01 when
1694 issuing an instr */
1695#define MIF_FRAME_ST 0x40000000 /* STart of frame */
1696#define MIF_FRAME_OPCODE_MASK 0x30000000 /* opcode. 01 for a
1697 write. 10 for a
1698 read */
1699#define MIF_FRAME_OP_READ 0x20000000 /* read OPcode */
1700#define MIF_FRAME_OP_WRITE 0x10000000 /* write OPcode */
1701#define MIF_FRAME_PHY_ADDR_MASK 0x0F800000 /* phy address. when
1702 issuing an instr,
1703 this field should be
1704 loaded w/ the XCVR
1705 addr */
1706#define MIF_FRAME_PHY_ADDR_SHIFT 23
1707#define MIF_FRAME_REG_ADDR_MASK 0x007C0000 /* register address.
1708 when issuing an instr,
1709 addr of register
1710 to be read/written */
1711#define MIF_FRAME_REG_ADDR_SHIFT 18
1712#define MIF_FRAME_TURN_AROUND_MSB 0x00020000 /* turn around, MSB.
1713 when issuing an instr,
1714 set this bit to 1 */
1715#define MIF_FRAME_TURN_AROUND_LSB 0x00010000 /* turn around, LSB.
1716 when issuing an instr,
1717 set this bit to 0.
1718 when polling for
1719 completion, 1 means
1720 that instr execution
1721 has been completed */
1722#define MIF_FRAME_DATA_MASK 0x0000FFFF /* instruction payload
1723 load with 16-bit data
1724 to be written in
1725 transceiver reg for a
1726 write. doesn't matter
1727 in a read. when
1728 polling for
1729 completion, field is
1730 "don't care" for write
1731 and 16-bit data
1732 returned by the
1733 transceiver for a
1734 read (if valid bit
1735 is set) */
1736#define REG_MIF_CFG 0x6210 /* MIF config reg */
1737#define MIF_CFG_PHY_SELECT 0x0001 /* 1 -> select MDIO_1
1738 0 -> select MDIO_0 */
1739#define MIF_CFG_POLL_EN 0x0002 /* enable polling
1740 mechanism. if set,
1741 BB_MODE should be 0 */
1742#define MIF_CFG_BB_MODE 0x0004 /* 1 -> bit-bang mode
1743 0 -> frame mode */
1744#define MIF_CFG_POLL_REG_MASK 0x00F8 /* register address to be
1745 used by polling mode.
1746 only meaningful if POLL_EN
1747 is set to 1 */
1748#define MIF_CFG_POLL_REG_SHIFT 3
1749#define MIF_CFG_MDIO_0 0x0100 /* (ro) dual purpose.
1750 when MDIO_0 is idle,
1751 1 -> tranceiver is
1752 connected to MDIO_0.
1753 when MIF is communicating
1754 w/ MDIO_0 in bit-bang
1755 mode, this bit indicates
1756 the incoming bit stream
1757 during a read op */
1758#define MIF_CFG_MDIO_1 0x0200 /* (ro) dual purpose.
1759 when MDIO_1 is idle,
1760 1 -> transceiver is
1761 connected to MDIO_1.
1762 when MIF is communicating
1763 w/ MDIO_1 in bit-bang
1764 mode, this bit indicates
1765 the incoming bit stream
1766 during a read op */
1767#define MIF_CFG_POLL_PHY_MASK 0x7C00 /* tranceiver address to
1768 be polled */
1769#define MIF_CFG_POLL_PHY_SHIFT 10
1770
1771/* 16-bit register used to determine which bits in the POLL_STATUS portion of
1772 * the MIF_STATUS register will cause an interrupt. if a mask bit is 0,
1773 * corresponding bit of the POLL_STATUS will generate a MIF interrupt when
1774 * set. DEFAULT: 0xFFFF
1775 */
1776#define REG_MIF_MASK 0x6214 /* MIF mask reg */
1777
1778/* 32-bit register used when in poll mode. auto-cleared after being read */
1779#define REG_MIF_STATUS 0x6218 /* MIF status reg */
1780#define MIF_STATUS_POLL_DATA_MASK 0xFFFF0000 /* poll data contains
1781 the "latest image"
1782 update of the XCVR
1783 reg being read */
1784#define MIF_STATUS_POLL_DATA_SHIFT 16
1785#define MIF_STATUS_POLL_STATUS_MASK 0x0000FFFF /* poll status indicates
1786 which bits in the
1787 POLL_DATA field have
1788 changed since the
1789 MIF_STATUS reg was
1790 last read */
1791#define MIF_STATUS_POLL_STATUS_SHIFT 0
1792
1793/* 7-bit register has current state for all state machines in the MIF */
1794#define REG_MIF_STATE_MACHINE 0x621C /* MIF state machine reg */
1795#define MIF_SM_CONTROL_MASK 0x07 /* control state machine
1796 state */
1797#define MIF_SM_EXECUTION_MASK 0x60 /* execution state machine
1798 state */
1799
1800/** PCS/Serialink. the following registers are equivalent to the standard
1801 * MII management registers except that they're directly mapped in
1802 * Cassini's register space.
1803 **/
1804
1805/* the auto-negotiation enable bit should be programmed the same at
1806 * the link partner as in the local device to enable auto-negotiation to
1807 * complete. when that bit is reprogrammed, auto-neg/manual config is
1808 * restarted automatically.
1809 * DEFAULT: 0x1040
1810 */
1811#define REG_PCS_MII_CTRL 0x9000 /* PCS MII control reg */
1812#define PCS_MII_CTRL_1000_SEL 0x0040 /* reads 1. ignored on
1813 writes */
1814#define PCS_MII_CTRL_COLLISION_TEST 0x0080 /* COL signal at the PCS
1815 to MAC interface is
1816 activated regardless
1817 of activity */
1818#define PCS_MII_CTRL_DUPLEX 0x0100 /* forced 0x0. PCS
1819 behaviour same for
1820 half and full dplx */
1821#define PCS_MII_RESTART_AUTONEG 0x0200 /* self clearing.
1822 restart auto-
1823 negotiation */
1824#define PCS_MII_ISOLATE 0x0400 /* read as 0. ignored
1825 on writes */
1826#define PCS_MII_POWER_DOWN 0x0800 /* read as 0. ignored
1827 on writes */
1828#define PCS_MII_AUTONEG_EN 0x1000 /* default 1. PCS goes
1829 through automatic
1830 link config before it
1831 can be used. when 0,
1832 link can be used
1833 w/out any link config
1834 phase */
1835#define PCS_MII_10_100_SEL 0x2000 /* read as 0. ignored on
1836 writes */
1837#define PCS_MII_RESET 0x8000 /* reset PCS. self-clears
1838 when done */
1839
1840/* DEFAULT: 0x0108 */
1841#define REG_PCS_MII_STATUS 0x9004 /* PCS MII status reg */
1842#define PCS_MII_STATUS_EXTEND_CAP 0x0001 /* reads 0 */
1843#define PCS_MII_STATUS_JABBER_DETECT 0x0002 /* reads 0 */
1844#define PCS_MII_STATUS_LINK_STATUS 0x0004 /* 1 -> link up.
1845 0 -> link down. 0 is
1846 latched so that 0 is
1847 kept until read. read
1848 2x to determine if the
1849 link has gone up again */
1850#define PCS_MII_STATUS_AUTONEG_ABLE 0x0008 /* reads 1 (able to perform
1851 auto-neg) */
1852#define PCS_MII_STATUS_REMOTE_FAULT 0x0010 /* 1 -> remote fault detected
1853 from received link code
1854 word. only valid after
1855 auto-neg completed */
1856#define PCS_MII_STATUS_AUTONEG_COMP 0x0020 /* 1 -> auto-negotiation
1857 completed
1858 0 -> auto-negotiation not
1859 completed */
1860#define PCS_MII_STATUS_EXTEND_STATUS 0x0100 /* reads as 1. used as an
1861 indication that this is
1862 a 1000 Base-X PHY. writes
1863 to it are ignored */
1864
1865/* used during auto-negotiation.
1866 * DEFAULT: 0x00E0
1867 */
1868#define REG_PCS_MII_ADVERT 0x9008 /* PCS MII advertisement
1869 reg */
1870#define PCS_MII_ADVERT_FD 0x0020 /* advertise full duplex
1871 1000 Base-X */
1872#define PCS_MII_ADVERT_HD 0x0040 /* advertise half-duplex
1873 1000 Base-X */
1874#define PCS_MII_ADVERT_SYM_PAUSE 0x0080 /* advertise PAUSE
1875 symmetric capability */
1876#define PCS_MII_ADVERT_ASYM_PAUSE 0x0100 /* advertises PAUSE
1877 asymmetric capability */
1878#define PCS_MII_ADVERT_RF_MASK 0x3000 /* remote fault. write bit13
1879 to optionally indicate to
1880 link partner that chip is
1881 going off-line. bit12 will
1882 get set when signal
1883 detect == FAIL and will
1884 remain set until
1885 successful negotiation */
1886#define PCS_MII_ADVERT_ACK 0x4000 /* (ro) */
1887#define PCS_MII_ADVERT_NEXT_PAGE 0x8000 /* (ro) forced 0x0 */
1888
1889/* contents updated as a result of autonegotiation. layout and definitions
1890 * identical to PCS_MII_ADVERT
1891 */
1892#define REG_PCS_MII_LPA 0x900C /* PCS MII link partner
1893 ability reg */
1894#define PCS_MII_LPA_FD PCS_MII_ADVERT_FD
1895#define PCS_MII_LPA_HD PCS_MII_ADVERT_HD
1896#define PCS_MII_LPA_SYM_PAUSE PCS_MII_ADVERT_SYM_PAUSE
1897#define PCS_MII_LPA_ASYM_PAUSE PCS_MII_ADVERT_ASYM_PAUSE
1898#define PCS_MII_LPA_RF_MASK PCS_MII_ADVERT_RF_MASK
1899#define PCS_MII_LPA_ACK PCS_MII_ADVERT_ACK
1900#define PCS_MII_LPA_NEXT_PAGE PCS_MII_ADVERT_NEXT_PAGE
1901
1902/* DEFAULT: 0x0 */
1903#define REG_PCS_CFG 0x9010 /* PCS config reg */
1904#define PCS_CFG_EN 0x01 /* enable PCS. must be
1905 0 when modifying
1906 PCS_MII_ADVERT */
1907#define PCS_CFG_SD_OVERRIDE 0x02 /* sets signal detect to
1908 OK. bit is
1909 non-resettable */
1910#define PCS_CFG_SD_ACTIVE_LOW 0x04 /* changes interpretation
1911 of optical signal to make
1912 signal detect okay when
1913 signal is low */
1914#define PCS_CFG_JITTER_STUDY_MASK 0x18 /* used to make jitter
1915 measurements. a single
1916 code group is xmitted
1917 regularly.
1918 0x0 = normal operation
1919 0x1 = high freq test
1920 pattern, D21.5
1921 0x2 = low freq test
1922 pattern, K28.7
1923 0x3 = reserved */
1924#define PCS_CFG_10MS_TIMER_OVERRIDE 0x20 /* shortens 10-20ms auto-
1925 negotiation timer to
1926 a few cycles for test
1927 purposes */
1928
1929/* used for diagnostic purposes. bits 20-22 autoclear on read */
1930#define REG_PCS_STATE_MACHINE 0x9014 /* (ro) PCS state machine
1931 and diagnostic reg */
1932#define PCS_SM_TX_STATE_MASK 0x0000000F /* 0 and 1 indicate
1933 xmission of idle.
1934 otherwise, xmission of
1935 a packet */
1936#define PCS_SM_RX_STATE_MASK 0x000000F0 /* 0 indicates reception
1937 of idle. otherwise,
1938 reception of packet */
1939#define PCS_SM_WORD_SYNC_STATE_MASK 0x00000700 /* 0 indicates loss of
1940 sync */
1941#define PCS_SM_SEQ_DETECT_STATE_MASK 0x00001800 /* cycling through 0-3
1942 indicates reception of
1943 Config codes. cycling
1944 through 0-1 indicates
1945 reception of idles */
1946#define PCS_SM_LINK_STATE_MASK 0x0001E000
1947#define SM_LINK_STATE_UP 0x00016000 /* link state is up */
1948
1949#define PCS_SM_LOSS_LINK_C 0x00100000 /* loss of link due to
1950 recept of Config
1951 codes */
1952#define PCS_SM_LOSS_LINK_SYNC 0x00200000 /* loss of link due to
1953 loss of sync */
1954#define PCS_SM_LOSS_SIGNAL_DETECT 0x00400000 /* signal detect goes
1955 from OK to FAIL. bit29
1956 will also be set if
1957 this is set */
1958#define PCS_SM_NO_LINK_BREAKLINK 0x01000000 /* link not up due to
1959 receipt of breaklink
1960 C codes from partner.
1961 C codes w/ 0 content
1962 received triggering
1963 start/restart of
1964 autonegotiation.
1965 should be sent for
1966 no longer than 20ms */
1967#define PCS_SM_NO_LINK_SERDES 0x02000000 /* serdes being
1968 initialized. see serdes
1969 state reg */
1970#define PCS_SM_NO_LINK_C 0x04000000 /* C codes not stable or
1971 not received */
1972#define PCS_SM_NO_LINK_SYNC 0x08000000 /* word sync not
1973 achieved */
1974#define PCS_SM_NO_LINK_WAIT_C 0x10000000 /* waiting for C codes
1975 w/ ack bit set */
1976#define PCS_SM_NO_LINK_NO_IDLE 0x20000000 /* link partner continues
1977 to send C codes
1978 instead of idle
1979 symbols or pkt data */
1980
1981/* this register indicates interrupt changes in specific PCS MII status bits.
1982 * PCS_INT may be masked at the ISR level. only a single bit is implemented
1983 * for link status change.
1984 */
1985#define REG_PCS_INTR_STATUS 0x9018 /* PCS interrupt status */
1986#define PCS_INTR_STATUS_LINK_CHANGE 0x04 /* link status has changed
1987 since last read */
1988
1989/* control which network interface is used. no more than one bit should
1990 * be set.
1991 * DEFAULT: none
1992 */
1993#define REG_PCS_DATAPATH_MODE 0x9050 /* datapath mode reg */
1994#define PCS_DATAPATH_MODE_MII 0x00 /* PCS is not used and
1995 MII/GMII is selected.
1996 selection between MII and
1997 GMII is controlled by
1998 XIF_CFG */
1999#define PCS_DATAPATH_MODE_SERDES 0x02 /* PCS is used via the
2000 10-bit interface */
2001
2002/* input to serdes chip or serialink block */
2003#define REG_PCS_SERDES_CTRL 0x9054 /* serdes control reg */
2004#define PCS_SERDES_CTRL_LOOPBACK 0x01 /* enable loopback on
2005 serdes interface */
2006#define PCS_SERDES_CTRL_SYNCD_EN 0x02 /* enable sync carrier
2007 detection. should be
2008 0x0 for normal
2009 operation */
2010#define PCS_SERDES_CTRL_LOCKREF 0x04 /* frequency-lock RBC[0:1]
2011 to REFCLK when set.
2012 when clear, receiver
2013 clock locks to incoming
2014 serial data */
2015
2016/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins.
2017 * should be 0x0 for normal operations.
2018 * 0b000 normal operation, PROM address[3:0] selected
2019 * 0b001 rxdma req, rxdma ack, rxdma ready, rxdma read
2020 * 0b010 rxmac req, rx ack, rx tag, rx clk shared
2021 * 0b011 txmac req, tx ack, tx tag, tx retry req
2022 * 0b100 tx tp3, tx tp2, tx tp1, tx tp0
2023 * 0b101 R period RX, R period TX, R period HP, R period BIM
2024 * DEFAULT: 0x0
2025 */
2026#define REG_PCS_SHARED_OUTPUT_SEL 0x9058 /* shared output select */
2027#define PCS_SOS_PROM_ADDR_MASK 0x0007
2028
2029/* used for diagnostics. this register indicates progress of the SERDES
2030 * boot up.
2031 * 0b00 undergoing reset
2032 * 0b01 waiting 500us while lockrefn is asserted
2033 * 0b10 waiting for comma detect
2034 * 0b11 receive data is synchronized
2035 * DEFAULT: 0x0
2036 */
2037#define REG_PCS_SERDES_STATE 0x905C /* (ro) serdes state */
2038#define PCS_SERDES_STATE_MASK 0x03
2039
2040/* used for diagnostics. indicates number of packets transmitted or received.
2041 * counters rollover w/out generating an interrupt.
2042 * DEFAULT: 0x0
2043 */
2044#define REG_PCS_PACKET_COUNT 0x9060 /* (ro) PCS packet counter */
2045#define PCS_PACKET_COUNT_TX 0x000007FF /* pkts xmitted by PCS */
2046#define PCS_PACKET_COUNT_RX 0x07FF0000 /* pkts recvd by PCS
2047 whether they
2048 encountered an error
2049 or not */
2050
2051/** LocalBus Devices. the following provides run-time access to the
2052 * Cassini's PROM
2053 ***/
2054#define REG_EXPANSION_ROM_RUN_START 0x100000 /* expansion rom run time
2055 access */
2056#define REG_EXPANSION_ROM_RUN_END 0x17FFFF
2057
2058#define REG_SECOND_LOCALBUS_START 0x180000 /* secondary local bus
2059 device */
2060#define REG_SECOND_LOCALBUS_END 0x1FFFFF
2061
2062/* entropy device */
2063#define REG_ENTROPY_START REG_SECOND_LOCALBUS_START
2064#define REG_ENTROPY_DATA (REG_ENTROPY_START + 0x00)
2065#define REG_ENTROPY_STATUS (REG_ENTROPY_START + 0x04)
2066#define ENTROPY_STATUS_DRDY 0x01
2067#define ENTROPY_STATUS_BUSY 0x02
2068#define ENTROPY_STATUS_CIPHER 0x04
2069#define ENTROPY_STATUS_BYPASS_MASK 0x18
2070#define REG_ENTROPY_MODE (REG_ENTROPY_START + 0x05)
2071#define ENTROPY_MODE_KEY_MASK 0x07
2072#define ENTROPY_MODE_ENCRYPT 0x40
2073#define REG_ENTROPY_RAND_REG (REG_ENTROPY_START + 0x06)
2074#define REG_ENTROPY_RESET (REG_ENTROPY_START + 0x07)
2075#define ENTROPY_RESET_DES_IO 0x01
2076#define ENTROPY_RESET_STC_MODE 0x02
2077#define ENTROPY_RESET_KEY_CACHE 0x04
2078#define ENTROPY_RESET_IV 0x08
2079#define REG_ENTROPY_IV (REG_ENTROPY_START + 0x08)
2080#define REG_ENTROPY_KEY0 (REG_ENTROPY_START + 0x10)
2081#define REG_ENTROPY_KEYN(x) (REG_ENTROPY_KEY0 + 4*(x))
2082
2083/* phys of interest w/ their special mii registers */
2084#define PHY_LUCENT_B0 0x00437421
2085#define LUCENT_MII_REG 0x1F
2086
2087#define PHY_NS_DP83065 0x20005c78
2088#define DP83065_MII_MEM 0x16
2089#define DP83065_MII_REGD 0x1D
2090#define DP83065_MII_REGE 0x1E
2091
2092#define PHY_BROADCOM_5411 0x00206071
2093#define PHY_BROADCOM_B0 0x00206050
2094#define BROADCOM_MII_REG4 0x14
2095#define BROADCOM_MII_REG5 0x15
2096#define BROADCOM_MII_REG7 0x17
2097#define BROADCOM_MII_REG8 0x18
2098
2099#define CAS_MII_ANNPTR 0x07
2100#define CAS_MII_ANNPRR 0x08
2101#define CAS_MII_1000_CTRL 0x09
2102#define CAS_MII_1000_STATUS 0x0A
2103#define CAS_MII_1000_EXTEND 0x0F
2104
2105#define CAS_BMSR_1000_EXTEND 0x0100 /* supports 1000Base-T extended status */
2106/*
2107 * if autoneg is disabled, here's the table:
2108 * BMCR_SPEED100 = 100Mbps
2109 * BMCR_SPEED1000 = 1000Mbps
2110 * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps
2111 */
2112#define CAS_BMCR_SPEED1000 0x0040 /* Select 1000Mbps */
2113
2114#define CAS_ADVERTISE_1000HALF 0x0100
2115#define CAS_ADVERTISE_1000FULL 0x0200
2116#define CAS_ADVERTISE_PAUSE 0x0400
2117#define CAS_ADVERTISE_ASYM_PAUSE 0x0800
2118
2119/* regular lpa register */
2120#define CAS_LPA_PAUSE CAS_ADVERTISE_PAUSE
2121#define CAS_LPA_ASYM_PAUSE CAS_ADVERTISE_ASYM_PAUSE
2122
2123/* 1000_STATUS register */
2124#define CAS_LPA_1000HALF 0x0400
2125#define CAS_LPA_1000FULL 0x0800
2126
2127#define CAS_EXTEND_1000XFULL 0x8000
2128#define CAS_EXTEND_1000XHALF 0x4000
2129#define CAS_EXTEND_1000TFULL 0x2000
2130#define CAS_EXTEND_1000THALF 0x1000
2131
2132/* cassini header parser firmware */
2133typedef struct cas_hp_inst {
2134 const char *note;
2135
2136 u16 mask, val;
2137
2138 u8 op;
2139 u8 soff, snext; /* if match succeeds, new offset and match */
2140 u8 foff, fnext; /* if match fails, new offset and match */
2141 /* output info */
2142 u8 outop; /* output opcode */
2143
2144 u16 outarg; /* output argument */
2145 u8 outenab; /* output enable: 0 = not, 1 = if match
2146 2 = if !match, 3 = always */
2147 u8 outshift; /* barrel shift right, 4 bits */
2148 u16 outmask;
2149} cas_hp_inst_t;
2150
2151/* comparison */
2152#define OP_EQ 0 /* packet == value */
2153#define OP_LT 1 /* packet < value */
2154#define OP_GT 2 /* packet > value */
2155#define OP_NP 3 /* new packet */
2156
2157/* output opcodes */
2158#define CL_REG 0
2159#define LD_FID 1
2160#define LD_SEQ 2
2161#define LD_CTL 3
2162#define LD_SAP 4
2163#define LD_R1 5
2164#define LD_L3 6
2165#define LD_SUM 7
2166#define LD_HDR 8
2167#define IM_FID 9
2168#define IM_SEQ 10
2169#define IM_SAP 11
2170#define IM_R1 12
2171#define IM_CTL 13
2172#define LD_LEN 14
2173#define ST_FLG 15
2174
2175/* match setp #s for IP4TCP4 */
2176#define S1_PCKT 0
2177#define S1_VLAN 1
2178#define S1_CFI 2
2179#define S1_8023 3
2180#define S1_LLC 4
2181#define S1_LLCc 5
2182#define S1_IPV4 6
2183#define S1_IPV4c 7
2184#define S1_IPV4F 8
2185#define S1_TCP44 9
2186#define S1_IPV6 10
2187#define S1_IPV6L 11
2188#define S1_IPV6c 12
2189#define S1_TCP64 13
2190#define S1_TCPSQ 14
2191#define S1_TCPFG 15
2192#define S1_TCPHL 16
2193#define S1_TCPHc 17
2194#define S1_CLNP 18
2195#define S1_CLNP2 19
2196#define S1_DROP 20
2197#define S2_HTTP 21
2198#define S1_ESP4 22
2199#define S1_AH4 23
2200#define S1_ESP6 24
2201#define S1_AH6 25
2202
2203#define CAS_PROG_IP46TCP4_PREAMBLE \
2204{ "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, \
2205 CL_REG, 0x3ff, 1, 0x0, 0x0000}, \
2206{ "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, \
2207 IM_CTL, 0x00a, 3, 0x0, 0xffff}, \
2208{ "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, \
2209 CL_REG, 0x000, 0, 0x0, 0x0000}, \
2210{ "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, \
2211 CL_REG, 0x000, 0, 0x0, 0x0000}, \
2212{ "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, \
2213 CL_REG, 0x000, 0, 0x0, 0x0000}, \
2214{ "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, \
2215 CL_REG, 0x000, 0, 0x0, 0x0000}, \
2216{ "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, \
2217 LD_SAP, 0x100, 3, 0x0, 0xffff}, \
2218{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, \
2219 LD_SUM, 0x00a, 1, 0x0, 0x0000}, \
2220{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, \
2221 LD_LEN, 0x03e, 1, 0x0, 0xffff}, \
2222{ "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, \
2223 LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \
2224{ "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, \
2225 LD_SUM, 0x015, 1, 0x0, 0x0000}, \
2226{ "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, \
2227 IM_R1, 0x128, 1, 0x0, 0xffff}, \
2228{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, \
2229 LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \
2230{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \
2231 LD_LEN, 0x03f, 1, 0x0, 0xffff}
2232
2233#ifdef USE_HP_IP46TCP4
2234static cas_hp_inst_t cas_prog_ip46tcp4tab[] = {
2235 CAS_PROG_IP46TCP4_PREAMBLE,
2236 { "TCP seq", /* DADDR should point to dest port */
2237 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
2238 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2239 { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
2240 S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
2241 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0,
2242 S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000},
2243 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2244 S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2245 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
2246 IM_CTL, 0x001, 3, 0x0, 0x0001},
2247 { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2248 IM_CTL, 0x000, 0, 0x0, 0x0000},
2249 { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2250 IM_CTL, 0x080, 3, 0x0, 0xffff},
2251 { NULL },
2252};
2253#ifdef HP_IP46TCP4_DEFAULT
2254#define CAS_HP_FIRMWARE cas_prog_ip46tcp4tab
2255#endif
2256#endif
2257
2258/*
2259 * Alternate table load which excludes HTTP server traffic from reassembly.
2260 * It is substantially similar to the basic table, with one extra state
2261 * and a few extra compares. */
2262#ifdef USE_HP_IP46TCP4NOHTTP
2263static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = {
2264 CAS_PROG_IP46TCP4_PREAMBLE,
2265 { "TCP seq", /* DADDR should point to dest port */
2266 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ,
2267 0x081, 3, 0x0, 0xffff} , /* Load TCP seq # */
2268 { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0,
2269 S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f, }, /* Load TCP flags */
2270 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
2271 LD_R1, 0x205, 3, 0xB, 0xf000},
2272 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2273 LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2274 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
2275 IM_CTL, 0x001, 3, 0x0, 0x0001},
2276 { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2277 CL_REG, 0x002, 3, 0x0, 0x0000},
2278 { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2279 IM_CTL, 0x080, 3, 0x0, 0xffff},
2280 { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2281 IM_CTL, 0x044, 3, 0x0, 0xffff},
2282 { NULL },
2283};
2284#ifdef HP_IP46TCP4NOHTTP_DEFAULT
2285#define CAS_HP_FIRMWARE cas_prog_ip46tcp4nohttptab
2286#endif
2287#endif
2288
2289/* match step #s for IP4FRAG */
2290#define S3_IPV6c 11
2291#define S3_TCP64 12
2292#define S3_TCPSQ 13
2293#define S3_TCPFG 14
2294#define S3_TCPHL 15
2295#define S3_TCPHc 16
2296#define S3_FRAG 17
2297#define S3_FOFF 18
2298#define S3_CLNP 19
2299
2300#ifdef USE_HP_IP4FRAG
2301static cas_hp_inst_t cas_prog_ip4fragtab[] = {
2302 { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT,
2303 CL_REG, 0x3ff, 1, 0x0, 0x0000},
2304 { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
2305 IM_CTL, 0x00a, 3, 0x0, 0xffff},
2306 { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023,
2307 CL_REG, 0x000, 0, 0x0, 0x0000},
2308 { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
2309 CL_REG, 0x000, 0, 0x0, 0x0000},
2310 { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP,
2311 CL_REG, 0x000, 0, 0x0, 0x0000},
2312 { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP,
2313 CL_REG, 0x000, 0, 0x0, 0x0000},
2314 { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
2315 LD_SAP, 0x100, 3, 0x0, 0xffff},
2316 { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP,
2317 LD_SUM, 0x00a, 1, 0x0, 0x0000},
2318 { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG,
2319 LD_LEN, 0x03e, 3, 0x0, 0xffff},
2320 { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP,
2321 LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
2322 { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP,
2323 LD_SUM, 0x015, 1, 0x0, 0x0000},
2324 { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP,
2325 LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
2326 { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP,
2327 LD_LEN, 0x03f, 1, 0x0, 0xffff},
2328 { "TCP seq", /* DADDR should point to dest port */
2329 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ,
2330 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2331 { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0,
2332 S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
2333 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc,
2334 LD_R1, 0x205, 3, 0xB, 0xf000},
2335 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2336 LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2337 { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
2338 LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */
2339 { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF,
2340 LD_SEQ, 0x040, 1, 0xD, 0xfff8},
2341 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2342 IM_CTL, 0x001, 3, 0x0, 0x0001},
2343 { NULL },
2344};
2345#ifdef HP_IP4FRAG_DEFAULT
2346#define CAS_HP_FIRMWARE cas_prog_ip4fragtab
2347#endif
2348#endif
2349
2350/*
2351 * Alternate table which does batching without reassembly
2352 */
2353#ifdef USE_HP_IP46TCP4BATCH
2354static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = {
2355 CAS_PROG_IP46TCP4_PREAMBLE,
2356 { "TCP seq", /* DADDR should point to dest port */
2357 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ,
2358 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2359 { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
2360 S1_TCPHL, ST_FLG, 0x000, 3, 0x0, 0x0000}, /* Load TCP flags */
2361 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0,
2362 S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000},
2363 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2364 S1_PCKT, IM_CTL, 0x040, 3, 0x0, 0xffff}, /* set batch bit */
2365 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2366 IM_CTL, 0x001, 3, 0x0, 0x0001},
2367 { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2368 S1_PCKT, IM_CTL, 0x080, 3, 0x0, 0xffff},
2369 { NULL },
2370};
2371#ifdef HP_IP46TCP4BATCH_DEFAULT
2372#define CAS_HP_FIRMWARE cas_prog_ip46tcp4batchtab
2373#endif
2374#endif
2375
2376/* Workaround for Cassini rev2 descriptor corruption problem.
2377 * Does batching without reassembly, and sets the SAP to a known
2378 * data pattern for all packets.
2379 */
2380#ifdef USE_HP_WORKAROUND
2381static cas_hp_inst_t cas_prog_workaroundtab[] = {
2382 { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0,
2383 S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} ,
2384 { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
2385 IM_CTL, 0x04a, 3, 0x0, 0xffff},
2386 { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023,
2387 CL_REG, 0x000, 0, 0x0, 0x0000},
2388 { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
2389 CL_REG, 0x000, 0, 0x0, 0x0000},
2390 { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP,
2391 CL_REG, 0x000, 0, 0x0, 0x0000},
2392 { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP,
2393 CL_REG, 0x000, 0, 0x0, 0x0000},
2394 { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
2395 IM_SAP, 0x6AE, 3, 0x0, 0xffff},
2396 { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP,
2397 LD_SUM, 0x00a, 1, 0x0, 0x0000},
2398 { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP,
2399 LD_LEN, 0x03e, 1, 0x0, 0xffff},
2400 { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP,
2401 LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
2402 { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP,
2403 LD_SUM, 0x015, 1, 0x0, 0x0000},
2404 { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP,
2405 IM_R1, 0x128, 1, 0x0, 0xffff},
2406 { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP,
2407 LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
2408 { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP,
2409 LD_LEN, 0x03f, 1, 0x0, 0xffff},
2410 { "TCP seq", /* DADDR should point to dest port */
2411 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ,
2412 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2413 { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0,
2414 S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */
2415 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
2416 LD_R1, 0x205, 3, 0xB, 0xf000},
2417 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2418 S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2419 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
2420 IM_SAP, 0x6AE, 3, 0x0, 0xffff} ,
2421 { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2422 IM_CTL, 0x001, 3, 0x0, 0x0001},
2423 { NULL },
2424};
2425#ifdef HP_WORKAROUND_DEFAULT
2426#define CAS_HP_FIRMWARE cas_prog_workaroundtab
2427#endif
2428#endif
2429
2430#ifdef USE_HP_ENCRYPT
2431static cas_hp_inst_t cas_prog_encryptiontab[] = {
2432 { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0,
2433 S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000},
2434 { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023,
2435 IM_CTL, 0x00a, 3, 0x0, 0xffff},
2436#if 0
2437//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */
2438//0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, CL_REG, 0x000, 0, 0x0, 0x00
2439 00,
2440#endif
2441 { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */
2442 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023,
2443 CL_REG, 0x000, 0, 0x0, 0x0000},
2444 { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4,
2445 CL_REG, 0x000, 0, 0x0, 0x0000},
2446 { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP,
2447 CL_REG, 0x000, 0, 0x0, 0x0000},
2448 { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP,
2449 CL_REG, 0x000, 0, 0x0, 0x0000},
2450 { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6,
2451 LD_SAP, 0x100, 3, 0x0, 0xffff},
2452 { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP,
2453 LD_SUM, 0x00a, 1, 0x0, 0x0000},
2454 { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP,
2455 LD_LEN, 0x03e, 1, 0x0, 0xffff},
2456 { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_ESP4,
2457 LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */
2458 { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP,
2459 LD_SUM, 0x015, 1, 0x0, 0x0000},
2460 { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP,
2461 IM_R1, 0x128, 1, 0x0, 0xffff},
2462 { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP,
2463 LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */
2464 { "TCP64?",
2465#if 0
2466//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6, LD_LEN, 0x03f, 1, 0x0, 0xffff,
2467#endif
2468 0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6, LD_LEN,
2469 0x03f, 1, 0x0, 0xffff},
2470 { "TCP seq", /* 14:DADDR should point to dest port */
2471 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ,
2472 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */
2473 { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0,
2474 S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f}, /* Load TCP flags */
2475 { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc,
2476 LD_R1, 0x205, 3, 0xB, 0xf000} ,
2477 { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0,
2478 S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff},
2479 { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2,
2480 IM_CTL, 0x001, 3, 0x0, 0x0001},
2481 { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2482 CL_REG, 0x002, 3, 0x0, 0x0000},
2483 { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2484 IM_CTL, 0x080, 3, 0x0, 0xffff},
2485 { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT,
2486 IM_CTL, 0x044, 3, 0x0, 0xffff},
2487 { "IPV4 ESP encrypted?", /* S1_ESP4 */
2488 0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH4, IM_CTL,
2489 0x021, 1, 0x0, 0xffff},
2490 { "IPV4 AH encrypted?", /* S1_AH4 */
2491 0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
2492 0x021, 1, 0x0, 0xffff},
2493 { "IPV6 ESP encrypted?", /* S1_ESP6 */
2494#if 0
2495//@@@0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1, 0x0, 0xffff,
2496#endif
2497 0xff00, 0x3200, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL,
2498 0x021, 1, 0x0, 0xffff},
2499 { "IPV6 AH encrypted?", /* S1_AH6 */
2500#if 0
2501//@@@0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1, 0x0, 0xffff,
2502#endif
2503 0xff00, 0x3300, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL,
2504 0x021, 1, 0x0, 0xffff},
2505 { NULL },
2506};
2507#ifdef HP_ENCRYPT_DEFAULT
2508#define CAS_HP_FIRMWARE cas_prog_encryptiontab
2509#endif
2510#endif
2511
2512static cas_hp_inst_t cas_prog_null[] = { {NULL} };
2513#ifdef HP_NULL_DEFAULT
2514#define CAS_HP_FIRMWARE cas_prog_null
2515#endif
2516
2517/* firmware patch for NS_DP83065 */
2518typedef struct cas_saturn_patch {
2519 u16 addr;
2520 u16 val;
2521} cas_saturn_patch_t;
2522
2523#if 1
2524cas_saturn_patch_t cas_saturn_patch[] = {
2525{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
2526{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
2527{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
2528{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
2529{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
2530{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
2531{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
2532{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
2533{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
2534{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
2535{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
2536{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
2537{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
2538{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
2539{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
2540{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
2541{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
2542{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
2543{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
2544{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
2545{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
2546{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
2547{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
2548{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
2549{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
2550{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
2551{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
2552{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
2553{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
2554{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
2555{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
2556{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
2557{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
2558{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
2559{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
2560{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
2561{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
2562{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
2563{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
2564{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
2565{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
2566{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
2567{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
2568{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
2569{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
2570{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
2571{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
2572{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
2573{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
2574{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
2575{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
2576{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
2577{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
2578{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
2579{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
2580{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
2581{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
2582{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
2583{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
2584{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
2585{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
2586{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
2587{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
2588{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
2589{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
2590{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
2591{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
2592{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
2593{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
2594{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
2595{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
2596{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
2597{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
2598{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
2599{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
2600{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
2601{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
2602{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
2603{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
2604{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
2605{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
2606{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
2607{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
2608{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
2609{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
2610{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
2611{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
2612{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
2613{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
2614{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
2615{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
2616{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
2617{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
2618{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
2619{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
2620{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
2621{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
2622{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
2623{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
2624{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
2625{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
2626{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
2627{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
2628{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
2629{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
2630{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
2631{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
2632{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
2633{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
2634{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
2635{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
2636{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
2637{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
2638{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
2639{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
2640{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
2641{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
2642{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
2643{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
2644{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
2645{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
2646{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
2647{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
2648{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
2649{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
2650{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
2651{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
2652{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
2653{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
2654{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
2655{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
2656{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
2657{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
2658{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
2659{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
2660{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
2661{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
2662{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
2663{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
2664{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
2665{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
2666{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
2667{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
2668{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
2669{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
2670{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
2671{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
2672{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
2673{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
2674{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
2675{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
2676{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
2677{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
2678{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
2679{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
2680{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
2681{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
2682{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
2683{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
2684{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
2685{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
2686{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
2687{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
2688{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
2689{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
2690{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
2691{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
2692{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
2693{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
2694{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
2695{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
2696{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
2697{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
2698{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
2699{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
2700{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
2701{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
2702{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
2703{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
2704{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
2705{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
2706{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
2707{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
2708{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
2709{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
2710{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
2711{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
2712{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
2713{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
2714{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
2715{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
2716{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
2717{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
2718{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
2719{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
2720{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
2721{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
2722{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
2723{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
2724{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
2725{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
2726{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
2727{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
2728{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
2729{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
2730{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
2731{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
2732{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
2733{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
2734{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
2735{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
2736{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
2737{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
2738{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
2739{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
2740{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
2741{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
2742{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
2743{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
2744{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
2745{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
2746{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
2747{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
2748{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
2749{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
2750{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
2751{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
2752{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
2753{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
2754{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
2755{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
2756{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
2757{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
2758{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
2759{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
2760{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
2761{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
2762{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
2763{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
2764{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
2765{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
2766{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
2767{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
2768{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
2769{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
2770{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
2771{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
2772{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
2773{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
2774{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
2775{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
2776{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
2777{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
2778{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
2779{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
2780{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
2781{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
2782{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
2783{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
2784{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
2785{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
2786{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
2787{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
2788{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
2789{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
2790{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
2791{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
2792{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
2793{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
2794{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
2795{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
2796{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
2797{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
2798{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
2799{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
2800{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
2801{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
2802{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
2803{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
2804{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
2805{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
2806{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
2807{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
2808{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
2809{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
2810{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
2811{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
2812{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
2813{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
2814{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
2815{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
2816{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
2817{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
2818{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
2819{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
2820{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
2821{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
2822{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
2823{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
2824{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
2825{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
2826{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
2827{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
2828{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
2829{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
2830{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
2831{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
2832{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
2833{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
2834{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
2835{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
2836{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
2837{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
2838{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
2839{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
2840{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
2841{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
2842{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
2843{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
2844{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
2845{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
2846{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
2847{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
2848{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
2849{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
2850{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
2851{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
2852{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
2853{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
2854{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
2855{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
2856{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
2857{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
2858{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
2859{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
2860{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
2861{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
2862{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
2863{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
2864{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
2865{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
2866{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
2867{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
2868{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
2869{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
2870{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
2871{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
2872{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
2873{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
2874{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
2875{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
2876{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
2877{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
2878{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
2879{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
2880{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
2881{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
2882{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
2883{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
2884{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
2885{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
2886{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
2887{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
2888{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
2889{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
2890{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
2891{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
2892{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
2893{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
2894{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
2895{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
2896{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
2897{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
2898{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
2899{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
2900{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
2901{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
2902{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
2903{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
2904{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
2905{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
2906{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
2907{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
2908{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
2909{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
2910{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
2911{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
2912{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
2913{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
2914{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
2915{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
2916{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
2917{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
2918{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
2919{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
2920{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
2921{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
2922{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
2923{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
2924{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
2925{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
2926{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
2927{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
2928{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
2929{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
2930{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
2931{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
2932{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
2933{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
2934{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
2935{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
2936{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
2937{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
2938{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
2939{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
2940{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
2941{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
2942{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
2943{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
2944{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
2945{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
2946{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
2947{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
2948{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
2949{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
2950{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
2951{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
2952{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
2953{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
2954{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
2955{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
2956{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
2957{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
2958{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
2959{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
2960{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
2961{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
2962{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
2963{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
2964{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
2965{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
2966{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
2967{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
2968{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
2969{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
2970{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
2971{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
2972{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
2973{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
2974{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
2975{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
2976{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
2977{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
2978{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
2979{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
2980{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
2981{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
2982{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
2983{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
2984{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
2985{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
2986{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
2987{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
2988{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
2989{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
2990{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
2991{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
2992{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
2993{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
2994{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
2995{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
2996{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
2997{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
2998{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
2999{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
3000{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
3001{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
3002{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
3003{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
3004{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
3005{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
3006{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
3007{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
3008{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
3009{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
3010{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
3011{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
3012{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
3013{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
3014{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
3015{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
3016{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
3017{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
3018{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
3019{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
3020{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
3021{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
3022{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
3023{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
3024{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
3025{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
3026{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
3027{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
3028{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
3029{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
3030{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
3031{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
3032{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
3033{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
3034{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x0000},
3035{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
3036{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
3037{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
3038{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
3039{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
3040{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
3041{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
3042{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
3043{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
3044{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
3045{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
3046{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
3047{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
3048{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
3049{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
3050{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
3051{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
3052{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
3053{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
3054{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
3055{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
3056{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
3057{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
3058{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
3059{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
3060{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
3061{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
3062{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
3063{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
3064{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
3065{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
3066{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
3067{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
3068{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
3069{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
3070{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
3071{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
3072{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
3073{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
3074{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
3075{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
3076{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
3077{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
3078{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
3079{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
3080{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
3081{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
3082{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
3083{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
3084{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
3085{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
3086{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
3087{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
3088{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
3089{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
3090{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
3091{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
3092{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
3093{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
3094{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
3095{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
3096{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
3097{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
3098{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
3099{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
3100{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
3101{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
3102{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
3103{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
3104{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
3105{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
3106{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
3107{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
3108{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
3109{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
3110{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
3111{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
3112{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
3113{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
3114{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
3115{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
3116{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
3117{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
3118{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
3119{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
3120{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
3121{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
3122{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
3123{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
3124{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
3125{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
3126{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
3127{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
3128{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
3129{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
3130{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
3131{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
3132{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
3133{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
3134{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
3135{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
3136{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
3137{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
3138{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
3139{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
3140{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
3141{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
3142{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
3143{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
3144{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
3145{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
3146{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
3147{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
3148{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
3149{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
3150{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
3151{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
3152{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
3153{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
3154{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
3155{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
3156{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
3157{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
3158{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
3159{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
3160{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
3161{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
3162{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
3163{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
3164{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
3165{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
3166{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
3167{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
3168{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
3169{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
3170{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
3171{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
3172{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
3173{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
3174{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
3175{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
3176{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
3177{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
3178{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
3179{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
3180{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
3181{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
3182{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
3183{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
3184{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
3185{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
3186{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
3187{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
3188{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
3189{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
3190{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
3191{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
3192{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
3193{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
3194{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
3195{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
3196{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
3197{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
3198{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
3199{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
3200{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
3201{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
3202{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
3203{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
3204{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
3205{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
3206{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
3207{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
3208{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
3209{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
3210{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
3211{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
3212{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
3213{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
3214{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
3215{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
3216{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
3217{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
3218{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
3219{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
3220{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
3221{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
3222{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
3223{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
3224{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
3225{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
3226{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
3227{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
3228{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
3229{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
3230{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
3231{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
3232{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
3233{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
3234{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
3235{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
3236{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
3237{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
3238{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
3239{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
3240{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
3241{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
3242{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
3243{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
3244{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
3245{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
3246{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
3247{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
3248{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
3249{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
3250{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
3251{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
3252{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
3253{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
3254{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
3255{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
3256{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
3257{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
3258{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
3259{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
3260{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
3261{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
3262{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
3263{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
3264{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
3265{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
3266{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
3267{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
3268{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
3269{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
3270{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
3271{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
3272{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
3273{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
3274{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
3275{0x8aca, 0x0039}, { 0x0, 0x0 }
3276};
3277#else
3278cas_saturn_patch_t cas_saturn_patch[] = {
3279{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
3280{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
3281{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
3282{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
3283{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
3284{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
3285{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
3286{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
3287{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
3288{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
3289{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
3290{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
3291{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
3292{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
3293{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
3294{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
3295{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
3296{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
3297{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
3298{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
3299{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
3300{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
3301{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
3302{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
3303{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
3304{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
3305{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
3306{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
3307{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
3308{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
3309{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
3310{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
3311{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
3312{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
3313{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
3314{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
3315{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
3316{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
3317{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
3318{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
3319{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
3320{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
3321{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
3322{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
3323{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
3324{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
3325{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
3326{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
3327{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
3328{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
3329{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
3330{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
3331{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
3332{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
3333{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
3334{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
3335{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
3336{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
3337{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
3338{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
3339{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
3340{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
3341{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
3342{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
3343{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
3344{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
3345{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
3346{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
3347{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
3348{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
3349{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
3350{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
3351{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
3352{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
3353{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
3354{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
3355{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
3356{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
3357{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
3358{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
3359{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
3360{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
3361{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
3362{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
3363{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
3364{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
3365{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
3366{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
3367{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
3368{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
3369{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
3370{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
3371{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
3372{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
3373{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
3374{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
3375{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
3376{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
3377{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
3378{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
3379{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
3380{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
3381{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
3382{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
3383{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
3384{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
3385{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
3386{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
3387{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
3388{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
3389{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
3390{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
3391{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
3392{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
3393{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
3394{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
3395{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
3396{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
3397{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
3398{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
3399{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
3400{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
3401{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
3402{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
3403{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
3404{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
3405{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
3406{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
3407{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
3408{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
3409{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
3410{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
3411{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
3412{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
3413{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
3414{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
3415{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
3416{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
3417{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
3418{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
3419{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
3420{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
3421{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
3422{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
3423{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
3424{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
3425{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
3426{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
3427{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
3428{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
3429{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
3430{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
3431{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
3432{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
3433{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
3434{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
3435{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
3436{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
3437{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
3438{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
3439{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
3440{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
3441{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
3442{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
3443{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
3444{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
3445{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
3446{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
3447{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
3448{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
3449{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
3450{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
3451{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
3452{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
3453{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
3454{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
3455{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
3456{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
3457{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
3458{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
3459{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
3460{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
3461{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
3462{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
3463{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
3464{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
3465{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
3466{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
3467{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
3468{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
3469{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
3470{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
3471{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
3472{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
3473{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
3474{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
3475{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
3476{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
3477{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
3478{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
3479{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
3480{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
3481{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
3482{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
3483{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
3484{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
3485{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
3486{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
3487{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
3488{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
3489{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
3490{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
3491{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
3492{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
3493{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
3494{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
3495{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
3496{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
3497{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
3498{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
3499{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
3500{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
3501{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
3502{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
3503{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
3504{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
3505{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
3506{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
3507{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
3508{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
3509{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
3510{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
3511{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
3512{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
3513{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
3514{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
3515{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
3516{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
3517{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
3518{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
3519{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
3520{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
3521{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
3522{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
3523{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
3524{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
3525{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
3526{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
3527{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
3528{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
3529{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
3530{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
3531{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
3532{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
3533{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
3534{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
3535{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
3536{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
3537{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
3538{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
3539{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
3540{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
3541{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
3542{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
3543{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
3544{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
3545{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
3546{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
3547{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
3548{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
3549{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
3550{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
3551{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
3552{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
3553{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
3554{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
3555{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
3556{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
3557{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
3558{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
3559{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
3560{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
3561{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
3562{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
3563{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
3564{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
3565{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
3566{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
3567{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
3568{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
3569{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
3570{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
3571{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
3572{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
3573{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
3574{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
3575{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
3576{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
3577{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
3578{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
3579{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
3580{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
3581{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
3582{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
3583{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
3584{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
3585{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
3586{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
3587{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
3588{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
3589{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
3590{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
3591{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
3592{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
3593{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
3594{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
3595{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
3596{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
3597{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
3598{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
3599{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
3600{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
3601{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
3602{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
3603{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
3604{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
3605{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
3606{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
3607{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
3608{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
3609{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
3610{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
3611{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
3612{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
3613{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
3614{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
3615{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
3616{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
3617{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
3618{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
3619{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
3620{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
3621{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
3622{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
3623{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
3624{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
3625{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
3626{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
3627{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
3628{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
3629{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
3630{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
3631{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
3632{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
3633{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
3634{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
3635{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
3636{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
3637{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
3638{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
3639{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
3640{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
3641{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
3642{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
3643{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
3644{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
3645{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
3646{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
3647{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
3648{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
3649{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
3650{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
3651{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
3652{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
3653{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
3654{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
3655{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
3656{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
3657{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
3658{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
3659{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
3660{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
3661{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
3662{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
3663{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
3664{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
3665{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
3666{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
3667{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
3668{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
3669{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
3670{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
3671{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
3672{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
3673{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
3674{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
3675{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
3676{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
3677{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
3678{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
3679{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
3680{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
3681{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
3682{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
3683{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
3684{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
3685{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
3686{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
3687{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
3688{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
3689{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
3690{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
3691{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
3692{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
3693{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
3694{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
3695{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
3696{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
3697{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
3698{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
3699{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
3700{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
3701{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
3702{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
3703{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
3704{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
3705{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
3706{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
3707{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
3708{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
3709{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
3710{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
3711{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
3712{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
3713{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
3714{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
3715{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
3716{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
3717{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
3718{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
3719{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
3720{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
3721{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
3722{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
3723{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
3724{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
3725{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
3726{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
3727{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
3728{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
3729{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
3730{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
3731{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
3732{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
3733{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
3734{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
3735{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
3736{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
3737{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
3738{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
3739{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
3740{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
3741{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
3742{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
3743{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
3744{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
3745{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
3746{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
3747{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
3748{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
3749{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
3750{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
3751{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
3752{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
3753{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
3754{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
3755{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
3756{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
3757{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
3758{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
3759{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
3760{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
3761{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
3762{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
3763{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
3764{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
3765{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
3766{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
3767{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
3768{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
3769{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
3770{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
3771{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
3772{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
3773{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
3774{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
3775{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
3776{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
3777{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
3778{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
3779{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
3780{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
3781{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
3782{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
3783{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
3784{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
3785{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
3786{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
3787{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
3788{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x006e},
3789{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
3790{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
3791{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
3792{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
3793{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
3794{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
3795{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
3796{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
3797{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
3798{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
3799{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
3800{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
3801{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
3802{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
3803{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
3804{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
3805{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
3806{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
3807{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
3808{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
3809{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
3810{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
3811{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
3812{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
3813{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
3814{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
3815{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
3816{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
3817{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
3818{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
3819{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
3820{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
3821{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
3822{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
3823{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
3824{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
3825{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
3826{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
3827{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
3828{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
3829{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
3830{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
3831{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
3832{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
3833{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
3834{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
3835{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
3836{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
3837{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
3838{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
3839{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
3840{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
3841{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
3842{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
3843{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
3844{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
3845{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
3846{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
3847{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
3848{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
3849{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
3850{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
3851{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
3852{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
3853{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
3854{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
3855{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
3856{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
3857{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
3858{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
3859{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
3860{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
3861{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
3862{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
3863{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
3864{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
3865{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
3866{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
3867{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
3868{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
3869{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
3870{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
3871{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
3872{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
3873{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
3874{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
3875{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
3876{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
3877{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
3878{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
3879{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
3880{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
3881{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
3882{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
3883{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
3884{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
3885{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
3886{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
3887{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
3888{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
3889{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
3890{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
3891{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
3892{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
3893{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
3894{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
3895{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
3896{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
3897{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
3898{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
3899{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
3900{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
3901{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
3902{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
3903{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
3904{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
3905{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
3906{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
3907{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
3908{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
3909{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
3910{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
3911{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
3912{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
3913{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
3914{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
3915{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
3916{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
3917{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
3918{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
3919{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
3920{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
3921{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
3922{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
3923{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
3924{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
3925{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
3926{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
3927{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
3928{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
3929{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
3930{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
3931{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
3932{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
3933{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
3934{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
3935{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
3936{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
3937{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
3938{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
3939{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
3940{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
3941{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
3942{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
3943{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
3944{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
3945{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
3946{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
3947{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
3948{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
3949{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
3950{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
3951{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
3952{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
3953{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
3954{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
3955{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
3956{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
3957{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
3958{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
3959{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
3960{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
3961{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
3962{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
3963{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
3964{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
3965{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
3966{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
3967{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
3968{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
3969{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
3970{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
3971{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
3972{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
3973{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
3974{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
3975{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
3976{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
3977{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
3978{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
3979{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
3980{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
3981{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
3982{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
3983{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
3984{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
3985{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
3986{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
3987{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
3988{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
3989{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
3990{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
3991{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
3992{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
3993{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
3994{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
3995{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
3996{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
3997{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
3998{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
3999{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
4000{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
4001{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
4002{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
4003{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
4004{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
4005{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
4006{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
4007{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
4008{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
4009{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
4010{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
4011{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
4012{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
4013{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
4014{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
4015{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
4016{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
4017{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
4018{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
4019{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
4020{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
4021{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
4022{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
4023{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
4024{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
4025{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
4026{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
4027{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
4028{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
4029{0x8aca, 0x0039}, { 0x0, 0x0 }
4030};
4031#endif
4032
4033
4034/* phy types */
4035#define CAS_PHY_UNKNOWN 0x00
4036#define CAS_PHY_SERDES 0x01
4037#define CAS_PHY_MII_MDIO0 0x02
4038#define CAS_PHY_MII_MDIO1 0x04
4039#define CAS_PHY_MII(x) ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1))
4040
4041/* _RING_INDEX is the index for the ring sizes to be used. _RING_SIZE
4042 * is the actual size. the default index for the various rings is
4043 * 8. NOTE: there a bunch of alignment constraints for the rings. to
4044 * deal with that, i just allocate rings to create the desired
4045 * alignment. here are the constraints:
4046 * RX DESC and COMP rings must be 8KB aligned
4047 * TX DESC must be 2KB aligned.
4048 * if you change the numbers, be cognizant of how the alignment will change
4049 * in INIT_BLOCK as well.
4050 */
4051
4052#define DESC_RING_I_TO_S(x) (32*(1 << (x)))
4053#define COMP_RING_I_TO_S(x) (128*(1 << (x)))
4054#define TX_DESC_RING_INDEX 4 /* 512 = 8k */
4055#define RX_DESC_RING_INDEX 4 /* 512 = 8k */
4056#define RX_COMP_RING_INDEX 4 /* 2048 = 64k: should be 4x rx ring size */
4057
4058#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0)
4059#error TX_DESC_RING_INDEX must be between 0 and 8
4060#endif
4061
4062#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0)
4063#error RX_DESC_RING_INDEX must be between 0 and 8
4064#endif
4065
4066#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0)
4067#error RX_COMP_RING_INDEX must be between 0 and 8
4068#endif
4069
4070#define N_TX_RINGS MAX_TX_RINGS /* for QoS */
4071#define N_TX_RINGS_MASK MAX_TX_RINGS_MASK
4072#define N_RX_DESC_RINGS MAX_RX_DESC_RINGS /* 1 for ipsec */
4073#define N_RX_COMP_RINGS 0x1 /* for mult. PCI interrupts */
4074
4075/* number of flows that can go through re-assembly */
4076#define N_RX_FLOWS 64
4077
4078#define TX_DESC_RING_SIZE DESC_RING_I_TO_S(TX_DESC_RING_INDEX)
4079#define RX_DESC_RING_SIZE DESC_RING_I_TO_S(RX_DESC_RING_INDEX)
4080#define RX_COMP_RING_SIZE COMP_RING_I_TO_S(RX_COMP_RING_INDEX)
4081#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX
4082#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX
4083#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX
4084#define TX_DESC_RINGN_SIZE(x) TX_DESC_RING_SIZE
4085#define RX_DESC_RINGN_SIZE(x) RX_DESC_RING_SIZE
4086#define RX_COMP_RINGN_SIZE(x) RX_COMP_RING_SIZE
4087
4088/* convert values */
4089#define CAS_BASE(x, y) (((y) << (x ## _SHIFT)) & (x ## _MASK))
4090#define CAS_VAL(x, y) (((y) & (x ## _MASK)) >> (x ## _SHIFT))
4091#define CAS_TX_RINGN_BASE(y) ((TX_DESC_RINGN_INDEX(y) << \
4092 TX_CFG_DESC_RINGN_SHIFT(y)) & \
4093 TX_CFG_DESC_RINGN_MASK(y))
4094
4095/* min is 2k, but we can't do jumbo frames unless it's at least 8k */
4096#define CAS_MIN_PAGE_SHIFT 11 /* 2048 */
4097#define CAS_JUMBO_PAGE_SHIFT 13 /* 8192 */
4098#define CAS_MAX_PAGE_SHIFT 14 /* 16384 */
4099
4100#define TX_DESC_BUFLEN_MASK 0x0000000000003FFFULL /* buffer length in
4101 bytes. 0 - 9256 */
4102#define TX_DESC_BUFLEN_SHIFT 0
4103#define TX_DESC_CSUM_START_MASK 0x00000000001F8000ULL /* checksum start. #
4104 of bytes to be
4105 skipped before
4106 csum calc begins.
4107 value must be
4108 even */
4109#define TX_DESC_CSUM_START_SHIFT 15
4110#define TX_DESC_CSUM_STUFF_MASK 0x000000001FE00000ULL /* checksum stuff.
4111 byte offset w/in
4112 the pkt for the
4113 1st csum byte.
4114 must be > 8 */
4115#define TX_DESC_CSUM_STUFF_SHIFT 21
4116#define TX_DESC_CSUM_EN 0x0000000020000000ULL /* enable checksum */
4117#define TX_DESC_EOF 0x0000000040000000ULL /* end of frame */
4118#define TX_DESC_SOF 0x0000000080000000ULL /* start of frame */
4119#define TX_DESC_INTME 0x0000000100000000ULL /* interrupt me */
4120#define TX_DESC_NO_CRC 0x0000000200000000ULL /* debugging only.
4121 CRC will not be
4122 inserted into
4123 outgoing frame. */
4124struct cas_tx_desc {
4125 u64 control;
4126 u64 buffer;
4127};
4128
4129/* descriptor ring for free buffers contains page-sized buffers. the index
4130 * value is not used by the hw in any way. it's just stored and returned in
4131 * the completion ring.
4132 */
4133struct cas_rx_desc {
4134 u64 index;
4135 u64 buffer;
4136};
4137
4138/* received packets are put on the completion ring. */
4139/* word 1 */
4140#define RX_COMP1_DATA_SIZE_MASK 0x0000000007FFE000ULL
4141#define RX_COMP1_DATA_SIZE_SHIFT 13
4142#define RX_COMP1_DATA_OFF_MASK 0x000001FFF8000000ULL
4143#define RX_COMP1_DATA_OFF_SHIFT 27
4144#define RX_COMP1_DATA_INDEX_MASK 0x007FFE0000000000ULL
4145#define RX_COMP1_DATA_INDEX_SHIFT 41
4146#define RX_COMP1_SKIP_MASK 0x0180000000000000ULL
4147#define RX_COMP1_SKIP_SHIFT 55
4148#define RX_COMP1_RELEASE_NEXT 0x0200000000000000ULL
4149#define RX_COMP1_SPLIT_PKT 0x0400000000000000ULL
4150#define RX_COMP1_RELEASE_FLOW 0x0800000000000000ULL
4151#define RX_COMP1_RELEASE_DATA 0x1000000000000000ULL
4152#define RX_COMP1_RELEASE_HDR 0x2000000000000000ULL
4153#define RX_COMP1_TYPE_MASK 0xC000000000000000ULL
4154#define RX_COMP1_TYPE_SHIFT 62
4155
4156/* word 2 */
4157#define RX_COMP2_NEXT_INDEX_MASK 0x00000007FFE00000ULL
4158#define RX_COMP2_NEXT_INDEX_SHIFT 21
4159#define RX_COMP2_HDR_SIZE_MASK 0x00000FF800000000ULL
4160#define RX_COMP2_HDR_SIZE_SHIFT 35
4161#define RX_COMP2_HDR_OFF_MASK 0x0003F00000000000ULL
4162#define RX_COMP2_HDR_OFF_SHIFT 44
4163#define RX_COMP2_HDR_INDEX_MASK 0xFFFC000000000000ULL
4164#define RX_COMP2_HDR_INDEX_SHIFT 50
4165
4166/* word 3 */
4167#define RX_COMP3_SMALL_PKT 0x0000000000000001ULL
4168#define RX_COMP3_JUMBO_PKT 0x0000000000000002ULL
4169#define RX_COMP3_JUMBO_HDR_SPLIT_EN 0x0000000000000004ULL
4170#define RX_COMP3_CSUM_START_MASK 0x000000000007F000ULL
4171#define RX_COMP3_CSUM_START_SHIFT 12
4172#define RX_COMP3_FLOWID_MASK 0x0000000001F80000ULL
4173#define RX_COMP3_FLOWID_SHIFT 19
4174#define RX_COMP3_OPCODE_MASK 0x000000000E000000ULL
4175#define RX_COMP3_OPCODE_SHIFT 25
4176#define RX_COMP3_FORCE_FLAG 0x0000000010000000ULL
4177#define RX_COMP3_NO_ASSIST 0x0000000020000000ULL
4178#define RX_COMP3_LOAD_BAL_MASK 0x000001F800000000ULL
4179#define RX_COMP3_LOAD_BAL_SHIFT 35
4180#define RX_PLUS_COMP3_ENC_PKT 0x0000020000000000ULL /* cas+ */
4181#define RX_COMP3_L3_HEAD_OFF_MASK 0x0000FE0000000000ULL /* cas */
4182#define RX_COMP3_L3_HEAD_OFF_SHIFT 41
4183#define RX_PLUS_COMP_L3_HEAD_OFF_MASK 0x0000FC0000000000ULL /* cas+ */
4184#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT 42
4185#define RX_COMP3_SAP_MASK 0xFFFF000000000000ULL
4186#define RX_COMP3_SAP_SHIFT 48
4187
4188/* word 4 */
4189#define RX_COMP4_TCP_CSUM_MASK 0x000000000000FFFFULL
4190#define RX_COMP4_TCP_CSUM_SHIFT 0
4191#define RX_COMP4_PKT_LEN_MASK 0x000000003FFF0000ULL
4192#define RX_COMP4_PKT_LEN_SHIFT 16
4193#define RX_COMP4_PERFECT_MATCH_MASK 0x00000003C0000000ULL
4194#define RX_COMP4_PERFECT_MATCH_SHIFT 30
4195#define RX_COMP4_ZERO 0x0000080000000000ULL
4196#define RX_COMP4_HASH_VAL_MASK 0x0FFFF00000000000ULL
4197#define RX_COMP4_HASH_VAL_SHIFT 44
4198#define RX_COMP4_HASH_PASS 0x1000000000000000ULL
4199#define RX_COMP4_BAD 0x4000000000000000ULL
4200#define RX_COMP4_LEN_MISMATCH 0x8000000000000000ULL
4201
4202/* we encode the following: ring/index/release. only 14 bits
4203 * are usable.
4204 * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and
4205 * MAX_RX_DESC_RINGS. */
4206#define RX_INDEX_NUM_MASK 0x0000000000000FFFULL
4207#define RX_INDEX_NUM_SHIFT 0
4208#define RX_INDEX_RING_MASK 0x0000000000001000ULL
4209#define RX_INDEX_RING_SHIFT 12
4210#define RX_INDEX_RELEASE 0x0000000000002000ULL
4211
4212struct cas_rx_comp {
4213 u64 word1;
4214 u64 word2;
4215 u64 word3;
4216 u64 word4;
4217};
4218
4219enum link_state {
4220 link_down = 0, /* No link, will retry */
4221 link_aneg, /* Autoneg in progress */
4222 link_force_try, /* Try Forced link speed */
4223 link_force_ret, /* Forced mode worked, retrying autoneg */
4224 link_force_ok, /* Stay in forced mode */
4225 link_up /* Link is up */
4226};
4227
4228typedef struct cas_page {
4229 struct list_head list;
4230 struct page *buffer;
4231 dma_addr_t dma_addr;
4232 int used;
4233} cas_page_t;
4234
4235
4236/* some alignment constraints:
4237 * TX DESC, RX DESC, and RX COMP must each be 8K aligned.
4238 * TX COMPWB must be 8-byte aligned.
4239 * to accomplish this, here's what we do:
4240 *
4241 * INIT_BLOCK_RX_COMP = 64k (already aligned)
4242 * INIT_BLOCK_RX_DESC = 8k
4243 * INIT_BLOCK_TX = 8k
4244 * INIT_BLOCK_RX1_DESC = 8k
4245 * TX COMPWB
4246 */
4247#define INIT_BLOCK_TX (TX_DESC_RING_SIZE)
4248#define INIT_BLOCK_RX_DESC (RX_DESC_RING_SIZE)
4249#define INIT_BLOCK_RX_COMP (RX_COMP_RING_SIZE)
4250
4251struct cas_init_block {
4252 struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
4253 struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
4254 struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
4255 u64 tx_compwb;
4256};
4257
4258/* tiny buffers to deal with target abort issue. we allocate a bit
4259 * over so that we don't have target abort issues with these buffers
4260 * as well.
4261 */
4262#define TX_TINY_BUF_LEN 0x100
4263#define TX_TINY_BUF_BLOCK ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN)
4264
4265struct cas_tiny_count {
4266 int nbufs;
4267 int used;
4268};
4269
4270struct cas {
4271 spinlock_t lock; /* for most bits */
4272 spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */
4273 spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */
4274 spinlock_t rx_inuse_lock; /* rx inuse list */
4275 spinlock_t rx_spare_lock; /* rx spare list */
4276
4277 void __iomem *regs;
4278 int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS];
4279 int rx_old[N_RX_DESC_RINGS];
4280 int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS];
4281 int rx_last[N_RX_DESC_RINGS];
4282
4283 /* Set when chip is actually in operational state
4284 * (ie. not power managed) */
4285 int hw_running;
4286 int opened;
4287 struct semaphore pm_sem; /* open/close/suspend/resume */
4288
4289 struct cas_init_block *init_block;
4290 struct cas_tx_desc *init_txds[MAX_TX_RINGS];
4291 struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS];
4292 struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS];
4293
4294 /* we use sk_buffs for tx and pages for rx. the rx skbuffs
4295 * are there for flow re-assembly. */
4296 struct sk_buff *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE];
4297 struct sk_buff_head rx_flows[N_RX_FLOWS];
4298 cas_page_t *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE];
4299 struct list_head rx_spare_list, rx_inuse_list;
4300 int rx_spares_needed;
4301
4302 /* for small packets when copying would be quicker than
4303 mapping */
4304 struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE];
4305 u8 *tx_tiny_bufs[N_TX_RINGS];
4306
4307 u32 msg_enable;
4308
4309 /* N_TX_RINGS must be >= N_RX_DESC_RINGS */
4310 struct net_device_stats net_stats[N_TX_RINGS + 1];
4311
4312 u32 pci_cfg[64 >> 2];
4313 u8 pci_revision;
4314
4315 int phy_type;
4316 int phy_addr;
4317 u32 phy_id;
4318#define CAS_FLAG_1000MB_CAP 0x00000001
4319#define CAS_FLAG_REG_PLUS 0x00000002
4320#define CAS_FLAG_TARGET_ABORT 0x00000004
4321#define CAS_FLAG_SATURN 0x00000008
4322#define CAS_FLAG_RXD_POST_MASK 0x000000F0
4323#define CAS_FLAG_RXD_POST_SHIFT 4
4324#define CAS_FLAG_RXD_POST(x) ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \
4325 CAS_FLAG_RXD_POST_MASK)
4326#define CAS_FLAG_ENTROPY_DEV 0x00000100
4327#define CAS_FLAG_NO_HW_CSUM 0x00000200
4328 u32 cas_flags;
4329 int packet_min; /* minimum packet size */
4330 int tx_fifo_size;
4331 int rx_fifo_size;
4332 int rx_pause_off;
4333 int rx_pause_on;
4334 int crc_size; /* 4 if half-duplex */
4335
4336 int pci_irq_INTC;
4337 int min_frame_size; /* for tx fifo workaround */
4338
4339 /* page size allocation */
4340 int page_size;
4341 int page_order;
4342 int mtu_stride;
4343
4344 u32 mac_rx_cfg;
4345
4346 /* Autoneg & PHY control */
4347 int link_cntl;
4348 int link_fcntl;
4349 enum link_state lstate;
4350 struct timer_list link_timer;
4351 int timer_ticks;
4352 struct work_struct reset_task;
4353#if 0
4354 atomic_t reset_task_pending;
4355#else
4356 atomic_t reset_task_pending;
4357 atomic_t reset_task_pending_mtu;
4358 atomic_t reset_task_pending_spare;
4359 atomic_t reset_task_pending_all;
4360#endif
4361
4362#ifdef CONFIG_CASSINI_QGE_DEBUG
4363 atomic_t interrupt_seen; /* 1 if any interrupts are getting through */
4364#endif
4365
4366 /* Link-down problem workaround */
4367#define LINK_TRANSITION_UNKNOWN 0
4368#define LINK_TRANSITION_ON_FAILURE 1
4369#define LINK_TRANSITION_STILL_FAILED 2
4370#define LINK_TRANSITION_LINK_UP 3
4371#define LINK_TRANSITION_LINK_CONFIG 4
4372#define LINK_TRANSITION_LINK_DOWN 5
4373#define LINK_TRANSITION_REQUESTED_RESET 6
4374 int link_transition;
4375 int link_transition_jiffies_valid;
4376 unsigned long link_transition_jiffies;
4377
4378 /* Tuning */
4379 u8 orig_cacheline_size; /* value when loaded */
4380#define CAS_PREF_CACHELINE_SIZE 0x20 /* Minimum desired */
4381
4382 /* Diagnostic counters and state. */
4383 int casreg_len; /* reg-space size for dumping */
4384 u64 pause_entered;
4385 u16 pause_last_time_recvd;
4386
4387 dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
4388 struct pci_dev *pdev;
4389 struct net_device *dev;
4390};
4391
4392#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
4393#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1))
4394#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1))
4395
4396#define TX_BUFF_COUNT(r, x, y) ((x) <= (y) ? ((y) - (x)) : \
4397 (TX_DESC_RINGN_SIZE(r) - (x) + (y)))
4398
4399#define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \
4400 (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \
4401 (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1)
4402
4403#define CAS_ALIGN(addr, align) \
4404 (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1))
4405
4406#define RX_FIFO_SIZE 16384
4407#define EXPANSION_ROM_SIZE 65536
4408
4409#define CAS_MC_EXACT_MATCH_SIZE 15
4410#define CAS_MC_HASH_SIZE 256
4411#define CAS_MC_HASH_MAX (CAS_MC_EXACT_MATCH_SIZE + \
4412 CAS_MC_HASH_SIZE)
4413
4414#define TX_TARGET_ABORT_LEN 0x20
4415#define RX_SWIVEL_OFF_VAL 0x2
4416#define RX_AE_FREEN_VAL(x) (RX_DESC_RINGN_SIZE(x) >> 1)
4417#define RX_AE_COMP_VAL (RX_COMP_RING_SIZE >> 1)
4418#define RX_BLANK_INTR_PKT_VAL 0x05
4419#define RX_BLANK_INTR_TIME_VAL 0x0F
4420#define HP_TCP_THRESH_VAL 1530 /* reduce to enable reassembly */
4421
4422#define RX_SPARE_COUNT (RX_DESC_RING_SIZE >> 1)
4423#define RX_SPARE_RECOVER_VAL (RX_SPARE_COUNT >> 2)
4424
4425#endif /* _CASSINI_H */
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index cdc07ccd7332..a6078ad9b654 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -140,6 +140,7 @@
140 140
141#include <asm/system.h> 141#include <asm/system.h>
142#include <asm/io.h> 142#include <asm/io.h>
143#include <asm/irq.h>
143#if ALLOW_DMA 144#if ALLOW_DMA
144#include <asm/dma.h> 145#include <asm/dma.h>
145#endif 146#endif
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 25cc20e415da..fbf1c06ec5c1 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1387,13 +1387,13 @@ static void e100_update_stats(struct nic *nic)
1387 ns->collisions += nic->tx_collisions; 1387 ns->collisions += nic->tx_collisions;
1388 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + 1388 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1389 le32_to_cpu(s->tx_lost_crs); 1389 le32_to_cpu(s->tx_lost_crs);
1390 ns->rx_dropped += le32_to_cpu(s->rx_resource_errors);
1391 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) + 1390 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1392 nic->rx_over_length_errors; 1391 nic->rx_over_length_errors;
1393 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); 1392 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1394 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); 1393 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1395 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); 1394 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1396 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); 1395 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1396 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1397 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + 1397 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1398 le32_to_cpu(s->rx_alignment_errors) + 1398 le32_to_cpu(s->rx_alignment_errors) +
1399 le32_to_cpu(s->rx_short_frame_errors) + 1399 le32_to_cpu(s->rx_short_frame_errors) +
@@ -1727,12 +1727,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
1727 1727
1728 if(unlikely(!(rfd_status & cb_ok))) { 1728 if(unlikely(!(rfd_status & cb_ok))) {
1729 /* Don't indicate if hardware indicates errors */ 1729 /* Don't indicate if hardware indicates errors */
1730 nic->net_stats.rx_dropped++;
1731 dev_kfree_skb_any(skb); 1730 dev_kfree_skb_any(skb);
1732 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) { 1731 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1733 /* Don't indicate oversized frames */ 1732 /* Don't indicate oversized frames */
1734 nic->rx_over_length_errors++; 1733 nic->rx_over_length_errors++;
1735 nic->net_stats.rx_dropped++;
1736 dev_kfree_skb_any(skb); 1734 dev_kfree_skb_any(skb);
1737 } else { 1735 } else {
1738 nic->net_stats.rx_packets++; 1736 nic->net_stats.rx_packets++;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7c8a0a22dcd5..ee687c902a20 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2544,7 +2544,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
2544 adapter->stats.crcerrs + adapter->stats.algnerrc + 2544 adapter->stats.crcerrs + adapter->stats.algnerrc +
2545 adapter->stats.rlec + adapter->stats.mpc + 2545 adapter->stats.rlec + adapter->stats.mpc +
2546 adapter->stats.cexterr; 2546 adapter->stats.cexterr;
2547 adapter->net_stats.rx_dropped = adapter->stats.mpc;
2548 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2547 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2549 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2548 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2550 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 2549 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 32d5fabd4b10..a2c4dd4fb221 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -99,7 +99,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs
99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); 99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
100 100
101#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
102#define IBMVETH_PROC_DIR "ibmveth" 102#define IBMVETH_PROC_DIR "net/ibmveth"
103static struct proc_dir_entry *ibmveth_proc_dir; 103static struct proc_dir_entry *ibmveth_proc_dir;
104#endif 104#endif
105 105
@@ -1010,7 +1010,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
1010#ifdef CONFIG_PROC_FS 1010#ifdef CONFIG_PROC_FS
1011static void ibmveth_proc_register_driver(void) 1011static void ibmveth_proc_register_driver(void)
1012{ 1012{
1013 ibmveth_proc_dir = create_proc_entry(IBMVETH_PROC_DIR, S_IFDIR, proc_net); 1013 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL);
1014 if (ibmveth_proc_dir) { 1014 if (ibmveth_proc_dir) {
1015 SET_MODULE_OWNER(ibmveth_proc_dir); 1015 SET_MODULE_OWNER(ibmveth_proc_dir);
1016 } 1016 }
@@ -1018,7 +1018,7 @@ static void ibmveth_proc_register_driver(void)
1018 1018
1019static void ibmveth_proc_unregister_driver(void) 1019static void ibmveth_proc_unregister_driver(void)
1020{ 1020{
1021 remove_proc_entry(IBMVETH_PROC_DIR, proc_net); 1021 remove_proc_entry(IBMVETH_PROC_DIR, NULL);
1022} 1022}
1023 1023
1024static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) 1024static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 6d9de626c967..651c5a6578fd 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1875,11 +1875,11 @@ static int __init vlsi_mod_init(void)
1875 1875
1876 sirpulse = !!sirpulse; 1876 sirpulse = !!sirpulse;
1877 1877
1878 /* create_proc_entry returns NULL if !CONFIG_PROC_FS. 1878 /* proc_mkdir returns NULL if !CONFIG_PROC_FS.
1879 * Failure to create the procfs entry is handled like running 1879 * Failure to create the procfs entry is handled like running
1880 * without procfs - it's not required for the driver to work. 1880 * without procfs - it's not required for the driver to work.
1881 */ 1881 */
1882 vlsi_proc_root = create_proc_entry(PROC_DIR, S_IFDIR, NULL); 1882 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL);
1883 if (vlsi_proc_root) { 1883 if (vlsi_proc_root) {
1884 /* protect registered procdir against module removal. 1884 /* protect registered procdir against module removal.
1885 * Because we are in the module init path there's no race 1885 * Because we are in the module init path there's no race
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 5c555373adbe..89d6d69be382 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1616,8 +1616,6 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1616 adapter->stats.icbc + 1616 adapter->stats.icbc +
1617 adapter->stats.ecbc + adapter->stats.mpc; 1617 adapter->stats.ecbc + adapter->stats.mpc;
1618 1618
1619 adapter->net_stats.rx_dropped = adapter->stats.mpc;
1620
1621 /* see above 1619 /* see above
1622 * adapter->net_stats.rx_length_errors = adapter->stats.rlec; 1620 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1623 */ 1621 */
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 82f236cc3b9b..a842ecc60a34 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -1070,7 +1070,7 @@ static int __init pppoe_proc_init(void)
1070{ 1070{
1071 struct proc_dir_entry *p; 1071 struct proc_dir_entry *p;
1072 1072
1073 p = create_proc_entry("pppoe", S_IRUGO, proc_net); 1073 p = create_proc_entry("net/pppoe", S_IRUGO, NULL);
1074 if (!p) 1074 if (!p)
1075 return -ENOMEM; 1075 return -ENOMEM;
1076 1076
@@ -1142,7 +1142,7 @@ static void __exit pppoe_exit(void)
1142 dev_remove_pack(&pppoes_ptype); 1142 dev_remove_pack(&pppoes_ptype);
1143 dev_remove_pack(&pppoed_ptype); 1143 dev_remove_pack(&pppoed_ptype);
1144 unregister_netdevice_notifier(&pppoe_notifier); 1144 unregister_netdevice_notifier(&pppoe_notifier);
1145 remove_proc_entry("pppoe", proc_net); 1145 remove_proc_entry("net/pppoe", NULL);
1146 proto_unregister(&pppoe_sk_proto); 1146 proto_unregister(&pppoe_sk_proto);
1147} 1147}
1148 1148
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index f0471d102e3c..afb3f186b884 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -92,19 +92,18 @@ VERSION 2.2LK <2005/01/25>
92#endif /* RTL8169_DEBUG */ 92#endif /* RTL8169_DEBUG */
93 93
94#define R8169_MSG_DEFAULT \ 94#define R8169_MSG_DEFAULT \
95 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ 95 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
96 NETIF_MSG_IFDOWN)
97 96
98#define TX_BUFFS_AVAIL(tp) \ 97#define TX_BUFFS_AVAIL(tp) \
99 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) 98 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
100 99
101#ifdef CONFIG_R8169_NAPI 100#ifdef CONFIG_R8169_NAPI
102#define rtl8169_rx_skb netif_receive_skb 101#define rtl8169_rx_skb netif_receive_skb
103#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx 102#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb
104#define rtl8169_rx_quota(count, quota) min(count, quota) 103#define rtl8169_rx_quota(count, quota) min(count, quota)
105#else 104#else
106#define rtl8169_rx_skb netif_rx 105#define rtl8169_rx_skb netif_rx
107#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb 106#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx
108#define rtl8169_rx_quota(count, quota) count 107#define rtl8169_rx_quota(count, quota) count
109#endif 108#endif
110 109
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c829e6a2e8a6..dd451e099a4c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -428,7 +428,7 @@ static int init_shared_mem(struct s2io_nic *nic)
428 DBG_PRINT(INIT_DBG, 428 DBG_PRINT(INIT_DBG,
429 "%s: Zero DMA address for TxDL. ", dev->name); 429 "%s: Zero DMA address for TxDL. ", dev->name);
430 DBG_PRINT(INIT_DBG, 430 DBG_PRINT(INIT_DBG,
431 "Virtual address %llx\n", (u64)tmp_v); 431 "Virtual address %p\n", tmp_v);
432 tmp_v = pci_alloc_consistent(nic->pdev, 432 tmp_v = pci_alloc_consistent(nic->pdev,
433 PAGE_SIZE, &tmp_p); 433 PAGE_SIZE, &tmp_p);
434 if (!tmp_v) { 434 if (!tmp_v) {
@@ -657,9 +657,10 @@ static void free_shared_mem(struct s2io_nic *nic)
657 mac_control->zerodma_virt_addr, 657 mac_control->zerodma_virt_addr,
658 (dma_addr_t)0); 658 (dma_addr_t)0);
659 DBG_PRINT(INIT_DBG, 659 DBG_PRINT(INIT_DBG,
660 "%s: Freeing TxDL with zero DMA addr. ", dev->name); 660 "%s: Freeing TxDL with zero DMA addr. ",
661 DBG_PRINT(INIT_DBG, "Virtual address %llx\n", 661 dev->name);
662 (u64)(mac_control->zerodma_virt_addr)); 662 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
663 mac_control->zerodma_virt_addr);
663 } 664 }
664 kfree(mac_control->fifos[i].list_info); 665 kfree(mac_control->fifos[i].list_info);
665 } 666 }
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index 6ee4771addf1..b18c92cb629e 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -235,7 +235,7 @@ static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr);
235 * Extern Function Prototypes 235 * Extern Function Prototypes
236 * 236 *
237 ******************************************************************************/ 237 ******************************************************************************/
238static const char SKRootName[] = "sk98lin"; 238static const char SKRootName[] = "net/sk98lin";
239static struct proc_dir_entry *pSkRootDir; 239static struct proc_dir_entry *pSkRootDir;
240extern struct file_operations sk_proc_fops; 240extern struct file_operations sk_proc_fops;
241 241
@@ -5216,17 +5216,15 @@ static struct pci_device_id skge_pci_tbl[] = {
5216 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5216 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5217 { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5217 { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5218 { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5218 { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5219 { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5219/* DLink card does not have valid VPD so this driver gags
5220 * { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5221 */
5220 { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5222 { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5221#if 0 /* don't handle Yukon2 cards at the moment -- mlindner@syskonnect.de */
5222 { PCI_VENDOR_ID_MARVELL, 0x4360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5223 { PCI_VENDOR_ID_MARVELL, 0x4361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5224#endif
5225 { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5223 { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5226 { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5224 { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5227 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5225 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
5228 { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5226 { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5229 { 0, } 5227 { 0 }
5230}; 5228};
5231 5229
5232MODULE_DEVICE_TABLE(pci, skge_pci_tbl); 5230MODULE_DEVICE_TABLE(pci, skge_pci_tbl);
@@ -5244,20 +5242,20 @@ static int __init skge_init(void)
5244{ 5242{
5245 int error; 5243 int error;
5246 5244
5247 pSkRootDir = proc_mkdir(SKRootName, proc_net); 5245 pSkRootDir = proc_mkdir(SKRootName, NULL);
5248 if (pSkRootDir) 5246 if (pSkRootDir)
5249 pSkRootDir->owner = THIS_MODULE; 5247 pSkRootDir->owner = THIS_MODULE;
5250 5248
5251 error = pci_register_driver(&skge_driver); 5249 error = pci_register_driver(&skge_driver);
5252 if (error) 5250 if (error)
5253 proc_net_remove(SKRootName); 5251 remove_proc_entry(SKRootName, NULL);
5254 return error; 5252 return error;
5255} 5253}
5256 5254
5257static void __exit skge_exit(void) 5255static void __exit skge_exit(void)
5258{ 5256{
5259 pci_unregister_driver(&skge_driver); 5257 pci_unregister_driver(&skge_driver);
5260 proc_net_remove(SKRootName); 5258 remove_proc_entry(SKRootName, NULL);
5261 5259
5262} 5260}
5263 5261
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index d7c98515fdfd..fd398da4993b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.9" 45#define DRV_VERSION "1.1"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
@@ -105,41 +105,28 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 105static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; 106static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
107 107
108/* Don't need to look at whole 16K.
109 * last interesting register is descriptor poll timer.
110 */
111#define SKGE_REGS_LEN (29*128)
112
113static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
114{ 109{
115 return SKGE_REGS_LEN; 110 return 0x4000;
116} 111}
117 112
118/* 113/*
119 * Returns copy of control register region 114 * Returns copy of whole control register region
120 * I/O region is divided into banks and certain regions are unreadable 115 * Note: skip RAM address register because accessing it will
116 * cause bus hangs!
121 */ 117 */
122static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, 118static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
123 void *p) 119 void *p)
124{ 120{
125 const struct skge_port *skge = netdev_priv(dev); 121 const struct skge_port *skge = netdev_priv(dev);
126 unsigned long offs;
127 const void __iomem *io = skge->hw->regs; 122 const void __iomem *io = skge->hw->regs;
128 static const unsigned long bankmap
129 = (1<<0) | (1<<2) | (1<<8) | (1<<9)
130 | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16)
131 | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23)
132 | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28);
133 123
134 regs->version = 1; 124 regs->version = 1;
135 for (offs = 0; offs < regs->len; offs += 128) { 125 memset(p, 0, regs->len);
136 u32 len = min_t(u32, 128, regs->len - offs); 126 memcpy_fromio(p, io, B3_RAM_ADDR);
137 127
138 if (bankmap & (1<<(offs/128))) 128 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
139 memcpy_fromio(p + offs, io + offs, len); 129 regs->len - B3_RI_WTO_R1);
140 else
141 memset(p + offs, 0, len);
142 }
143} 130}
144 131
145/* Wake on Lan only supported on Yukon chps with rev 1 or above */ 132/* Wake on Lan only supported on Yukon chps with rev 1 or above */
@@ -669,7 +656,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
669 PHY_M_LED_BLINK_RT(BLINK_84MS) | 656 PHY_M_LED_BLINK_RT(BLINK_84MS) |
670 PHY_M_LEDC_TX_CTRL | 657 PHY_M_LEDC_TX_CTRL |
671 PHY_M_LEDC_DP_CTRL); 658 PHY_M_LEDC_DP_CTRL);
672 659
673 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 660 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
674 PHY_M_LED_MO_RX(MO_LED_OFF) | 661 PHY_M_LED_MO_RX(MO_LED_OFF) |
675 (skge->speed == SPEED_100 ? 662 (skge->speed == SPEED_100 ?
@@ -775,17 +762,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
775 return 0; 762 return 0;
776} 763}
777 764
778static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
779{
780 struct sk_buff *skb = dev_alloc_skb(size);
781
782 if (likely(skb)) {
783 skb->dev = dev;
784 skb_reserve(skb, NET_IP_ALIGN);
785 }
786 return skb;
787}
788
789/* Allocate and setup a new buffer for receiving */ 765/* Allocate and setup a new buffer for receiving */
790static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, 766static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
791 struct sk_buff *skb, unsigned int bufsize) 767 struct sk_buff *skb, unsigned int bufsize)
@@ -858,16 +834,17 @@ static int skge_rx_fill(struct skge_port *skge)
858{ 834{
859 struct skge_ring *ring = &skge->rx_ring; 835 struct skge_ring *ring = &skge->rx_ring;
860 struct skge_element *e; 836 struct skge_element *e;
861 unsigned int bufsize = skge->rx_buf_size;
862 837
863 e = ring->start; 838 e = ring->start;
864 do { 839 do {
865 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); 840 struct sk_buff *skb;
866 841
842 skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
867 if (!skb) 843 if (!skb)
868 return -ENOMEM; 844 return -ENOMEM;
869 845
870 skge_rx_setup(skge, e, skb, bufsize); 846 skb_reserve(skb, NET_IP_ALIGN);
847 skge_rx_setup(skge, e, skb, skge->rx_buf_size);
871 } while ( (e = e->next) != ring->start); 848 } while ( (e = e->next) != ring->start);
872 849
873 ring->to_clean = ring->start; 850 ring->to_clean = ring->start;
@@ -876,7 +853,7 @@ static int skge_rx_fill(struct skge_port *skge)
876 853
877static void skge_link_up(struct skge_port *skge) 854static void skge_link_up(struct skge_port *skge)
878{ 855{
879 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 856 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
880 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 857 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
881 858
882 netif_carrier_on(skge->netdev); 859 netif_carrier_on(skge->netdev);
@@ -987,6 +964,8 @@ static void genesis_reset(struct skge_hw *hw, int port)
987{ 964{
988 const u8 zero[8] = { 0 }; 965 const u8 zero[8] = { 0 };
989 966
967 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
968
990 /* reset the statistics module */ 969 /* reset the statistics module */
991 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 970 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
992 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ 971 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
@@ -1021,8 +1000,6 @@ static void bcom_check_link(struct skge_hw *hw, int port)
1021 (void) xm_phy_read(hw, port, PHY_BCOM_STAT); 1000 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
1022 status = xm_phy_read(hw, port, PHY_BCOM_STAT); 1001 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1023 1002
1024 pr_debug("bcom_check_link status=0x%x\n", status);
1025
1026 if ((status & PHY_ST_LSYNC) == 0) { 1003 if ((status & PHY_ST_LSYNC) == 0) {
1027 u16 cmd = xm_read16(hw, port, XM_MMU_CMD); 1004 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
1028 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1005 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
@@ -1106,8 +1083,6 @@ static void bcom_phy_init(struct skge_port *skge, int jumbo)
1106 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1083 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1107 }; 1084 };
1108 1085
1109 pr_debug("bcom_phy_init\n");
1110
1111 /* read Id from external PHY (all have the same address) */ 1086 /* read Id from external PHY (all have the same address) */
1112 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); 1087 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1113 1088
@@ -1340,6 +1315,8 @@ static void genesis_stop(struct skge_port *skge)
1340 int port = skge->port; 1315 int port = skge->port;
1341 u32 reg; 1316 u32 reg;
1342 1317
1318 genesis_reset(hw, port);
1319
1343 /* Clear Tx packet arbiter timeout IRQ */ 1320 /* Clear Tx packet arbiter timeout IRQ */
1344 skge_write16(hw, B3_PA_CTRL, 1321 skge_write16(hw, B3_PA_CTRL,
1345 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); 1322 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
@@ -1465,7 +1442,6 @@ static void genesis_link_up(struct skge_port *skge)
1465 u16 cmd; 1442 u16 cmd;
1466 u32 mode, msk; 1443 u32 mode, msk;
1467 1444
1468 pr_debug("genesis_link_up\n");
1469 cmd = xm_read16(hw, port, XM_MMU_CMD); 1445 cmd = xm_read16(hw, port, XM_MMU_CMD);
1470 1446
1471 /* 1447 /*
@@ -1578,7 +1554,6 @@ static void yukon_init(struct skge_hw *hw, int port)
1578 struct skge_port *skge = netdev_priv(hw->dev[port]); 1554 struct skge_port *skge = netdev_priv(hw->dev[port]);
1579 u16 ctrl, ct1000, adv; 1555 u16 ctrl, ct1000, adv;
1580 1556
1581 pr_debug("yukon_init\n");
1582 if (skge->autoneg == AUTONEG_ENABLE) { 1557 if (skge->autoneg == AUTONEG_ENABLE) {
1583 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1558 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1584 1559
@@ -1668,6 +1643,22 @@ static void yukon_reset(struct skge_hw *hw, int port)
1668 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 1643 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1669} 1644}
1670 1645
1646/* Apparently, early versions of Yukon-Lite had wrong chip_id? */
1647static int is_yukon_lite_a0(struct skge_hw *hw)
1648{
1649 u32 reg;
1650 int ret;
1651
1652 if (hw->chip_id != CHIP_ID_YUKON)
1653 return 0;
1654
1655 reg = skge_read32(hw, B2_FAR);
1656 skge_write8(hw, B2_FAR + 3, 0xff);
1657 ret = (skge_read8(hw, B2_FAR + 3) != 0);
1658 skge_write32(hw, B2_FAR, reg);
1659 return ret;
1660}
1661
1671static void yukon_mac_init(struct skge_hw *hw, int port) 1662static void yukon_mac_init(struct skge_hw *hw, int port)
1672{ 1663{
1673 struct skge_port *skge = netdev_priv(hw->dev[port]); 1664 struct skge_port *skge = netdev_priv(hw->dev[port]);
@@ -1677,9 +1668,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1677 1668
1678 /* WA code for COMA mode -- set PHY reset */ 1669 /* WA code for COMA mode -- set PHY reset */
1679 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1670 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1680 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1671 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1681 skge_write32(hw, B2_GP_IO, 1672 reg = skge_read32(hw, B2_GP_IO);
1682 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9)); 1673 reg |= GP_DIR_9 | GP_IO_9;
1674 skge_write32(hw, B2_GP_IO, reg);
1675 }
1683 1676
1684 /* hard reset */ 1677 /* hard reset */
1685 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1678 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
@@ -1687,10 +1680,12 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1687 1680
1688 /* WA code for COMA mode -- clear PHY reset */ 1681 /* WA code for COMA mode -- clear PHY reset */
1689 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1682 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1690 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1683 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1691 skge_write32(hw, B2_GP_IO, 1684 reg = skge_read32(hw, B2_GP_IO);
1692 (skge_read32(hw, B2_GP_IO) | GP_DIR_9) 1685 reg |= GP_DIR_9;
1693 & ~GP_IO_9); 1686 reg &= ~GP_IO_9;
1687 skge_write32(hw, B2_GP_IO, reg);
1688 }
1694 1689
1695 /* Set hardware config mode */ 1690 /* Set hardware config mode */
1696 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | 1691 reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
@@ -1729,7 +1724,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1729 } 1724 }
1730 1725
1731 gma_write16(hw, port, GM_GP_CTRL, reg); 1726 gma_write16(hw, port, GM_GP_CTRL, reg);
1732 skge_read16(hw, GMAC_IRQ_SRC); 1727 skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
1733 1728
1734 yukon_init(hw, port); 1729 yukon_init(hw, port);
1735 1730
@@ -1779,9 +1774,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1779 /* Configure Rx MAC FIFO */ 1774 /* Configure Rx MAC FIFO */
1780 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 1775 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1781 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 1776 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1782 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1777
1783 hw->chip_rev >= CHIP_REV_YU_LITE_A3) 1778 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
1779 if (is_yukon_lite_a0(hw))
1784 reg &= ~GMF_RX_F_FL_ON; 1780 reg &= ~GMF_RX_F_FL_ON;
1781
1785 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1782 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1786 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); 1783 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1787 /* 1784 /*
@@ -1801,20 +1798,26 @@ static void yukon_stop(struct skge_port *skge)
1801 struct skge_hw *hw = skge->hw; 1798 struct skge_hw *hw = skge->hw;
1802 int port = skge->port; 1799 int port = skge->port;
1803 1800
1804 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1801 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
1805 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 1802 yukon_reset(hw, port);
1806 skge_write32(hw, B2_GP_IO,
1807 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1808 }
1809 1803
1810 gma_write16(hw, port, GM_GP_CTRL, 1804 gma_write16(hw, port, GM_GP_CTRL,
1811 gma_read16(hw, port, GM_GP_CTRL) 1805 gma_read16(hw, port, GM_GP_CTRL)
1812 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); 1806 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
1813 gma_read16(hw, port, GM_GP_CTRL); 1807 gma_read16(hw, port, GM_GP_CTRL);
1814 1808
1809 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1810 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
1811 u32 io = skge_read32(hw, B2_GP_IO);
1812
1813 io |= GP_DIR_9 | GP_IO_9;
1814 skge_write32(hw, B2_GP_IO, io);
1815 skge_read32(hw, B2_GP_IO);
1816 }
1817
1815 /* set GPHY Control reset */ 1818 /* set GPHY Control reset */
1816 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); 1819 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1817 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); 1820 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1818} 1821}
1819 1822
1820static void yukon_get_stats(struct skge_port *skge, u64 *data) 1823static void yukon_get_stats(struct skge_port *skge, u64 *data)
@@ -1873,10 +1876,8 @@ static void yukon_link_up(struct skge_port *skge)
1873 int port = skge->port; 1876 int port = skge->port;
1874 u16 reg; 1877 u16 reg;
1875 1878
1876 pr_debug("yukon_link_up\n");
1877
1878 /* Enable Transmit FIFO Underrun */ 1879 /* Enable Transmit FIFO Underrun */
1879 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); 1880 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
1880 1881
1881 reg = gma_read16(hw, port, GM_GP_CTRL); 1882 reg = gma_read16(hw, port, GM_GP_CTRL);
1882 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 1883 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
@@ -1896,7 +1897,6 @@ static void yukon_link_down(struct skge_port *skge)
1896 int port = skge->port; 1897 int port = skge->port;
1897 u16 ctrl; 1898 u16 ctrl;
1898 1899
1899 pr_debug("yukon_link_down\n");
1900 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 1900 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1901 1901
1902 ctrl = gma_read16(hw, port, GM_GP_CTRL); 1902 ctrl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2112,7 +2112,6 @@ static int skge_up(struct net_device *dev)
2112 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2112 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2113 skge_led(skge, LED_MODE_ON); 2113 skge_led(skge, LED_MODE_ON);
2114 2114
2115 pr_debug("skge_up completed\n");
2116 return 0; 2115 return 0;
2117 2116
2118 free_rx_ring: 2117 free_rx_ring:
@@ -2135,15 +2134,20 @@ static int skge_down(struct net_device *dev)
2135 2134
2136 netif_stop_queue(dev); 2135 netif_stop_queue(dev);
2137 2136
2137 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2138 if (hw->chip_id == CHIP_ID_GENESIS)
2139 genesis_stop(skge);
2140 else
2141 yukon_stop(skge);
2142
2143 hw->intr_mask &= ~portirqmask[skge->port];
2144 skge_write32(hw, B0_IMSK, hw->intr_mask);
2145
2138 /* Stop transmitter */ 2146 /* Stop transmitter */
2139 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2147 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2140 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2148 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
2141 RB_RST_SET|RB_DIS_OP_MD); 2149 RB_RST_SET|RB_DIS_OP_MD);
2142 2150
2143 if (hw->chip_id == CHIP_ID_GENESIS)
2144 genesis_stop(skge);
2145 else
2146 yukon_stop(skge);
2147 2151
2148 /* Disable Force Sync bit and Enable Alloc bit */ 2152 /* Disable Force Sync bit and Enable Alloc bit */
2149 skge_write8(hw, SK_REG(port, TXA_CTRL), 2153 skge_write8(hw, SK_REG(port, TXA_CTRL),
@@ -2367,8 +2371,6 @@ static void genesis_set_multicast(struct net_device *dev)
2367 u32 mode; 2371 u32 mode;
2368 u8 filter[8]; 2372 u8 filter[8];
2369 2373
2370 pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
2371
2372 mode = xm_read32(hw, port, XM_MODE); 2374 mode = xm_read32(hw, port, XM_MODE);
2373 mode |= XM_MD_ENA_HASH; 2375 mode |= XM_MD_ENA_HASH;
2374 if (dev->flags & IFF_PROMISC) 2376 if (dev->flags & IFF_PROMISC)
@@ -2435,6 +2437,14 @@ static void yukon_set_multicast(struct net_device *dev)
2435 gma_write16(hw, port, GM_RX_CTRL, reg); 2437 gma_write16(hw, port, GM_RX_CTRL, reg);
2436} 2438}
2437 2439
2440static inline u16 phy_length(const struct skge_hw *hw, u32 status)
2441{
2442 if (hw->chip_id == CHIP_ID_GENESIS)
2443 return status >> XMR_FS_LEN_SHIFT;
2444 else
2445 return status >> GMR_FS_LEN_SHIFT;
2446}
2447
2438static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2448static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2439{ 2449{
2440 if (hw->chip_id == CHIP_ID_GENESIS) 2450 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2444,80 +2454,99 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2444 (status & GMR_FS_RX_OK) == 0; 2454 (status & GMR_FS_RX_OK) == 0;
2445} 2455}
2446 2456
2447static void skge_rx_error(struct skge_port *skge, int slot,
2448 u32 control, u32 status)
2449{
2450 if (netif_msg_rx_err(skge))
2451 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2452 skge->netdev->name, slot, control, status);
2453
2454 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2455 skge->net_stats.rx_length_errors++;
2456 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2457 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2458 skge->net_stats.rx_length_errors++;
2459 if (status & XMR_FS_FRA_ERR)
2460 skge->net_stats.rx_frame_errors++;
2461 if (status & XMR_FS_FCS_ERR)
2462 skge->net_stats.rx_crc_errors++;
2463 } else {
2464 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2465 skge->net_stats.rx_length_errors++;
2466 if (status & GMR_FS_FRAGMENT)
2467 skge->net_stats.rx_frame_errors++;
2468 if (status & GMR_FS_CRC_ERR)
2469 skge->net_stats.rx_crc_errors++;
2470 }
2471}
2472 2457
2473/* Get receive buffer from descriptor. 2458/* Get receive buffer from descriptor.
2474 * Handles copy of small buffers and reallocation failures 2459 * Handles copy of small buffers and reallocation failures
2475 */ 2460 */
2476static inline struct sk_buff *skge_rx_get(struct skge_port *skge, 2461static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2477 struct skge_element *e, 2462 struct skge_element *e,
2478 unsigned int len) 2463 u32 control, u32 status, u16 csum)
2479{ 2464{
2480 struct sk_buff *nskb, *skb; 2465 struct sk_buff *skb;
2466 u16 len = control & BMU_BBC;
2467
2468 if (unlikely(netif_msg_rx_status(skge)))
2469 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2470 skge->netdev->name, e - skge->rx_ring.start,
2471 status, len);
2472
2473 if (len > skge->rx_buf_size)
2474 goto error;
2475
2476 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2477 goto error;
2478
2479 if (bad_phy_status(skge->hw, status))
2480 goto error;
2481
2482 if (phy_length(skge->hw, status) != len)
2483 goto error;
2481 2484
2482 if (len < RX_COPY_THRESHOLD) { 2485 if (len < RX_COPY_THRESHOLD) {
2483 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); 2486 skb = dev_alloc_skb(len + 2);
2484 if (unlikely(!nskb)) 2487 if (!skb)
2485 return NULL; 2488 goto resubmit;
2486 2489
2490 skb_reserve(skb, 2);
2487 pci_dma_sync_single_for_cpu(skge->hw->pdev, 2491 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2488 pci_unmap_addr(e, mapaddr), 2492 pci_unmap_addr(e, mapaddr),
2489 len, PCI_DMA_FROMDEVICE); 2493 len, PCI_DMA_FROMDEVICE);
2490 memcpy(nskb->data, e->skb->data, len); 2494 memcpy(skb->data, e->skb->data, len);
2491 pci_dma_sync_single_for_device(skge->hw->pdev, 2495 pci_dma_sync_single_for_device(skge->hw->pdev,
2492 pci_unmap_addr(e, mapaddr), 2496 pci_unmap_addr(e, mapaddr),
2493 len, PCI_DMA_FROMDEVICE); 2497 len, PCI_DMA_FROMDEVICE);
2494
2495 if (skge->rx_csum) {
2496 struct skge_rx_desc *rd = e->desc;
2497 nskb->csum = le16_to_cpu(rd->csum2);
2498 nskb->ip_summed = CHECKSUM_HW;
2499 }
2500 skge_rx_reuse(e, skge->rx_buf_size); 2498 skge_rx_reuse(e, skge->rx_buf_size);
2501 return nskb;
2502 } else { 2499 } else {
2503 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); 2500 struct sk_buff *nskb;
2504 if (unlikely(!nskb)) 2501 nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN);
2505 return NULL; 2502 if (!nskb)
2503 goto resubmit;
2506 2504
2507 pci_unmap_single(skge->hw->pdev, 2505 pci_unmap_single(skge->hw->pdev,
2508 pci_unmap_addr(e, mapaddr), 2506 pci_unmap_addr(e, mapaddr),
2509 pci_unmap_len(e, maplen), 2507 pci_unmap_len(e, maplen),
2510 PCI_DMA_FROMDEVICE); 2508 PCI_DMA_FROMDEVICE);
2511 skb = e->skb; 2509 skb = e->skb;
2512 if (skge->rx_csum) { 2510 prefetch(skb->data);
2513 struct skge_rx_desc *rd = e->desc;
2514 skb->csum = le16_to_cpu(rd->csum2);
2515 skb->ip_summed = CHECKSUM_HW;
2516 }
2517
2518 skge_rx_setup(skge, e, nskb, skge->rx_buf_size); 2511 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2519 return skb;
2520 } 2512 }
2513
2514 skb_put(skb, len);
2515 skb->dev = skge->netdev;
2516 if (skge->rx_csum) {
2517 skb->csum = csum;
2518 skb->ip_summed = CHECKSUM_HW;
2519 }
2520
2521 skb->protocol = eth_type_trans(skb, skge->netdev);
2522
2523 return skb;
2524error:
2525
2526 if (netif_msg_rx_err(skge))
2527 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2528 skge->netdev->name, e - skge->rx_ring.start,
2529 control, status);
2530
2531 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2532 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2533 skge->net_stats.rx_length_errors++;
2534 if (status & XMR_FS_FRA_ERR)
2535 skge->net_stats.rx_frame_errors++;
2536 if (status & XMR_FS_FCS_ERR)
2537 skge->net_stats.rx_crc_errors++;
2538 } else {
2539 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2540 skge->net_stats.rx_length_errors++;
2541 if (status & GMR_FS_FRAGMENT)
2542 skge->net_stats.rx_frame_errors++;
2543 if (status & GMR_FS_CRC_ERR)
2544 skge->net_stats.rx_crc_errors++;
2545 }
2546
2547resubmit:
2548 skge_rx_reuse(e, skge->rx_buf_size);
2549 return NULL;
2521} 2550}
2522 2551
2523 2552
@@ -2530,37 +2559,19 @@ static int skge_poll(struct net_device *dev, int *budget)
2530 unsigned int to_do = min(dev->quota, *budget); 2559 unsigned int to_do = min(dev->quota, *budget);
2531 unsigned int work_done = 0; 2560 unsigned int work_done = 0;
2532 2561
2533 pr_debug("skge_poll\n");
2534
2535 for (e = ring->to_clean; work_done < to_do; e = e->next) { 2562 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2536 struct skge_rx_desc *rd = e->desc; 2563 struct skge_rx_desc *rd = e->desc;
2537 struct sk_buff *skb; 2564 struct sk_buff *skb;
2538 u32 control, len, status; 2565 u32 control;
2539 2566
2540 rmb(); 2567 rmb();
2541 control = rd->control; 2568 control = rd->control;
2542 if (control & BMU_OWN) 2569 if (control & BMU_OWN)
2543 break; 2570 break;
2544 2571
2545 len = control & BMU_BBC; 2572 skb = skge_rx_get(skge, e, control, rd->status,
2546 status = rd->status; 2573 le16_to_cpu(rd->csum2));
2547
2548 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2549 || bad_phy_status(hw, status))) {
2550 skge_rx_error(skge, e - ring->start, control, status);
2551 skge_rx_reuse(e, skge->rx_buf_size);
2552 continue;
2553 }
2554
2555 if (netif_msg_rx_status(skge))
2556 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2557 dev->name, e - ring->start, rd->status, len);
2558
2559 skb = skge_rx_get(skge, e, len);
2560 if (likely(skb)) { 2574 if (likely(skb)) {
2561 skb_put(skb, len);
2562 skb->protocol = eth_type_trans(skb, dev);
2563
2564 dev->last_rx = jiffies; 2575 dev->last_rx = jiffies;
2565 netif_receive_skb(skb); 2576 netif_receive_skb(skb);
2566 2577
@@ -2672,9 +2683,9 @@ static void skge_error_irq(struct skge_hw *hw)
2672 if (hw->chip_id == CHIP_ID_GENESIS) { 2683 if (hw->chip_id == CHIP_ID_GENESIS) {
2673 /* clear xmac errors */ 2684 /* clear xmac errors */
2674 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 2685 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2675 skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); 2686 skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT);
2676 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 2687 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2677 skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); 2688 skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT);
2678 } else { 2689 } else {
2679 /* Timestamp (unused) overflow */ 2690 /* Timestamp (unused) overflow */
2680 if (hwstatus & IS_IRQ_TIST_OV) 2691 if (hwstatus & IS_IRQ_TIST_OV)
@@ -3000,9 +3011,6 @@ static int skge_reset(struct skge_hw *hw)
3000 3011
3001 skge_write32(hw, B0_IMSK, hw->intr_mask); 3012 skge_write32(hw, B0_IMSK, hw->intr_mask);
3002 3013
3003 if (hw->chip_id != CHIP_ID_GENESIS)
3004 skge_write8(hw, GMAC_IRQ_MSK, 0);
3005
3006 spin_lock_bh(&hw->phy_lock); 3014 spin_lock_bh(&hw->phy_lock);
3007 for (i = 0; i < hw->ports; i++) { 3015 for (i = 0; i < hw->ports; i++) {
3008 if (hw->chip_id == CHIP_ID_GENESIS) 3016 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -3230,6 +3238,11 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3230 dev0 = hw->dev[0]; 3238 dev0 = hw->dev[0];
3231 unregister_netdev(dev0); 3239 unregister_netdev(dev0);
3232 3240
3241 skge_write32(hw, B0_IMSK, 0);
3242 skge_write16(hw, B0_LED, LED_STAT_OFF);
3243 skge_pci_clear(hw);
3244 skge_write8(hw, B0_CTST, CS_RST_SET);
3245
3233 tasklet_kill(&hw->ext_tasklet); 3246 tasklet_kill(&hw->ext_tasklet);
3234 3247
3235 free_irq(pdev->irq, hw); 3248 free_irq(pdev->irq, hw);
@@ -3238,7 +3251,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3238 if (dev1) 3251 if (dev1)
3239 free_netdev(dev1); 3252 free_netdev(dev1);
3240 free_netdev(dev0); 3253 free_netdev(dev0);
3241 skge_write16(hw, B0_LED, LED_STAT_OFF); 3254
3242 iounmap(hw->regs); 3255 iounmap(hw->regs);
3243 kfree(hw); 3256 kfree(hw);
3244 pci_set_drvdata(pdev, NULL); 3257 pci_set_drvdata(pdev, NULL);
@@ -3257,7 +3270,10 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3257 struct skge_port *skge = netdev_priv(dev); 3270 struct skge_port *skge = netdev_priv(dev);
3258 if (netif_running(dev)) { 3271 if (netif_running(dev)) {
3259 netif_carrier_off(dev); 3272 netif_carrier_off(dev);
3260 skge_down(dev); 3273 if (skge->wol)
3274 netif_stop_queue(dev);
3275 else
3276 skge_down(dev);
3261 } 3277 }
3262 netif_device_detach(dev); 3278 netif_device_detach(dev);
3263 wol |= skge->wol; 3279 wol |= skge->wol;
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index f1680beb8e68..72c175b87a5a 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -953,6 +953,7 @@ enum {
953 */ 953 */
954enum { 954enum {
955 XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ 955 XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */
956 XMR_FS_LEN_SHIFT = 18,
956 XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ 957 XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/
957 XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ 958 XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/
958 XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ 959 XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */
@@ -1868,6 +1869,7 @@ enum {
1868/* Receive Frame Status Encoding */ 1869/* Receive Frame Status Encoding */
1869enum { 1870enum {
1870 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ 1871 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
1872 GMR_FS_LEN_SHIFT = 16,
1871 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ 1873 GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */
1872 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ 1874 GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */
1873 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ 1875 GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */
@@ -2008,7 +2010,7 @@ enum {
2008 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ 2010 GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
2009 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ 2011 GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
2010 2012
2011#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | GM_IS_TX_FF_UR) 2013#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR)
2012 2014
2013/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ 2015/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
2014 /* Bits 15.. 2: reserved */ 2016 /* Bits 15.. 2: reserved */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 4e19220473d0..c796f41b4a52 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1817,6 +1817,10 @@ spider_net_setup_phy(struct spider_net_card *card)
1817 /* LEDs active in both modes, autosense prio = fiber */ 1817 /* LEDs active in both modes, autosense prio = fiber */
1818 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f); 1818 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
1819 1819
1820 /* switch off fibre autoneg */
1821 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0xfc01);
1822 spider_net_write_phy(card->netdev, 1, 0x0b, 0x0004);
1823
1820 phy->def->ops->read_link(phy); 1824 phy->def->ops->read_link(phy);
1821 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name, 1825 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
1822 phy->speed, phy->duplex==1 ? "Full" : "Half"); 1826 phy->speed, phy->duplex==1 ? "Full" : "Half");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7599f52e15b3..1802c3b48799 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67 67
68#define DRV_MODULE_NAME "tg3" 68#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 69#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.39" 70#define DRV_MODULE_VERSION "3.42"
71#define DRV_MODULE_RELDATE "September 5, 2005" 71#define DRV_MODULE_RELDATE "Oct 3, 2005"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -3389,7 +3389,8 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3389 struct tg3 *tp = netdev_priv(dev); 3389 struct tg3 *tp = netdev_priv(dev);
3390 struct tg3_hw_status *sblk = tp->hw_status; 3390 struct tg3_hw_status *sblk = tp->hw_status;
3391 3391
3392 if (sblk->status & SD_STATUS_UPDATED) { 3392 if ((sblk->status & SD_STATUS_UPDATED) ||
3393 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3393 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3394 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3394 0x00000001); 3395 0x00000001);
3395 return IRQ_RETVAL(1); 3396 return IRQ_RETVAL(1);
@@ -3442,31 +3443,47 @@ static void tg3_tx_timeout(struct net_device *dev)
3442 schedule_work(&tp->reset_task); 3443 schedule_work(&tp->reset_task);
3443} 3444}
3444 3445
3446/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3447static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3448{
3449 u32 base = (u32) mapping & 0xffffffff;
3450
3451 return ((base > 0xffffdcc0) &&
3452 (base + len + 8 < base));
3453}
3454
3445static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); 3455static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3446 3456
3447static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 3457static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3448 u32 guilty_entry, int guilty_len, 3458 u32 last_plus_one, u32 *start,
3449 u32 last_plus_one, u32 *start, u32 mss) 3459 u32 base_flags, u32 mss)
3450{ 3460{
3451 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC); 3461 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3452 dma_addr_t new_addr; 3462 dma_addr_t new_addr = 0;
3453 u32 entry = *start; 3463 u32 entry = *start;
3454 int i; 3464 int i, ret = 0;
3455 3465
3456 if (!new_skb) { 3466 if (!new_skb) {
3457 dev_kfree_skb(skb); 3467 ret = -1;
3458 return -1; 3468 } else {
3469 /* New SKB is guaranteed to be linear. */
3470 entry = *start;
3471 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3472 PCI_DMA_TODEVICE);
3473 /* Make sure new skb does not cross any 4G boundaries.
3474 * Drop the packet if it does.
3475 */
3476 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3477 ret = -1;
3478 dev_kfree_skb(new_skb);
3479 new_skb = NULL;
3480 } else {
3481 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3482 base_flags, 1 | (mss << 1));
3483 *start = NEXT_TX(entry);
3484 }
3459 } 3485 }
3460 3486
3461 /* New SKB is guaranteed to be linear. */
3462 entry = *start;
3463 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3464 PCI_DMA_TODEVICE);
3465 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3466 (skb->ip_summed == CHECKSUM_HW) ?
3467 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3468 *start = NEXT_TX(entry);
3469
3470 /* Now clean up the sw ring entries. */ 3487 /* Now clean up the sw ring entries. */
3471 i = 0; 3488 i = 0;
3472 while (entry != last_plus_one) { 3489 while (entry != last_plus_one) {
@@ -3491,7 +3508,7 @@ static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3491 3508
3492 dev_kfree_skb(skb); 3509 dev_kfree_skb(skb);
3493 3510
3494 return 0; 3511 return ret;
3495} 3512}
3496 3513
3497static void tg3_set_txd(struct tg3 *tp, int entry, 3514static void tg3_set_txd(struct tg3 *tp, int entry,
@@ -3517,19 +3534,10 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
3517 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; 3534 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3518} 3535}
3519 3536
3520static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3521{
3522 u32 base = (u32) mapping & 0xffffffff;
3523
3524 return ((base > 0xffffdcc0) &&
3525 (base + len + 8 < base));
3526}
3527
3528static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 3537static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3529{ 3538{
3530 struct tg3 *tp = netdev_priv(dev); 3539 struct tg3 *tp = netdev_priv(dev);
3531 dma_addr_t mapping; 3540 dma_addr_t mapping;
3532 unsigned int i;
3533 u32 len, entry, base_flags, mss; 3541 u32 len, entry, base_flags, mss;
3534 int would_hit_hwbug; 3542 int would_hit_hwbug;
3535 3543
@@ -3624,7 +3632,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3624 would_hit_hwbug = 0; 3632 would_hit_hwbug = 0;
3625 3633
3626 if (tg3_4g_overflow_test(mapping, len)) 3634 if (tg3_4g_overflow_test(mapping, len))
3627 would_hit_hwbug = entry + 1; 3635 would_hit_hwbug = 1;
3628 3636
3629 tg3_set_txd(tp, entry, mapping, len, base_flags, 3637 tg3_set_txd(tp, entry, mapping, len, base_flags,
3630 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 3638 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -3648,12 +3656,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3648 tp->tx_buffers[entry].skb = NULL; 3656 tp->tx_buffers[entry].skb = NULL;
3649 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); 3657 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3650 3658
3651 if (tg3_4g_overflow_test(mapping, len)) { 3659 if (tg3_4g_overflow_test(mapping, len))
3652 /* Only one should match. */ 3660 would_hit_hwbug = 1;
3653 if (would_hit_hwbug)
3654 BUG();
3655 would_hit_hwbug = entry + 1;
3656 }
3657 3661
3658 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 3662 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3659 tg3_set_txd(tp, entry, mapping, len, 3663 tg3_set_txd(tp, entry, mapping, len,
@@ -3669,34 +3673,15 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3669 if (would_hit_hwbug) { 3673 if (would_hit_hwbug) {
3670 u32 last_plus_one = entry; 3674 u32 last_plus_one = entry;
3671 u32 start; 3675 u32 start;
3672 unsigned int len = 0;
3673
3674 would_hit_hwbug -= 1;
3675 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3676 entry &= (TG3_TX_RING_SIZE - 1);
3677 start = entry;
3678 i = 0;
3679 while (entry != last_plus_one) {
3680 if (i == 0)
3681 len = skb_headlen(skb);
3682 else
3683 len = skb_shinfo(skb)->frags[i-1].size;
3684 3676
3685 if (entry == would_hit_hwbug) 3677 start = entry - 1 - skb_shinfo(skb)->nr_frags;
3686 break; 3678 start &= (TG3_TX_RING_SIZE - 1);
3687
3688 i++;
3689 entry = NEXT_TX(entry);
3690
3691 }
3692 3679
3693 /* If the workaround fails due to memory/mapping 3680 /* If the workaround fails due to memory/mapping
3694 * failure, silently drop this packet. 3681 * failure, silently drop this packet.
3695 */ 3682 */
3696 if (tigon3_4gb_hwbug_workaround(tp, skb, 3683 if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
3697 entry, len, 3684 &start, base_flags, mss))
3698 last_plus_one,
3699 &start, mss))
3700 goto out_unlock; 3685 goto out_unlock;
3701 3686
3702 entry = start; 3687 entry = start;
@@ -5411,6 +5396,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
5411 struct tg3 *tp = netdev_priv(dev); 5396 struct tg3 *tp = netdev_priv(dev);
5412 struct sockaddr *addr = p; 5397 struct sockaddr *addr = p;
5413 5398
5399 if (!is_valid_ether_addr(addr->sa_data))
5400 return -EINVAL;
5401
5414 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 5402 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5415 5403
5416 spin_lock_bh(&tp->lock); 5404 spin_lock_bh(&tp->lock);
@@ -5822,6 +5810,13 @@ static int tg3_reset_hw(struct tg3 *tp)
5822 } 5810 }
5823 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); 5811 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5824 5812
5813 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5814 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
5815 /* reset to prevent losing 1st rx packet intermittently */
5816 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5817 udelay(10);
5818 }
5819
5825 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 5820 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5826 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; 5821 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5827 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); 5822 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
@@ -5953,7 +5948,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5953 tw32(MAC_LED_CTRL, tp->led_ctrl); 5948 tw32(MAC_LED_CTRL, tp->led_ctrl);
5954 5949
5955 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); 5950 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5956 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { 5951 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5957 tw32_f(MAC_RX_MODE, RX_MODE_RESET); 5952 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5958 udelay(10); 5953 udelay(10);
5959 } 5954 }
@@ -7376,12 +7371,17 @@ static int tg3_nway_reset(struct net_device *dev)
7376 if (!netif_running(dev)) 7371 if (!netif_running(dev))
7377 return -EAGAIN; 7372 return -EAGAIN;
7378 7373
7374 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7375 return -EINVAL;
7376
7379 spin_lock_bh(&tp->lock); 7377 spin_lock_bh(&tp->lock);
7380 r = -EINVAL; 7378 r = -EINVAL;
7381 tg3_readphy(tp, MII_BMCR, &bmcr); 7379 tg3_readphy(tp, MII_BMCR, &bmcr);
7382 if (!tg3_readphy(tp, MII_BMCR, &bmcr) && 7380 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7383 (bmcr & BMCR_ANENABLE)) { 7381 ((bmcr & BMCR_ANENABLE) ||
7384 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART); 7382 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7383 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7384 BMCR_ANENABLE);
7385 r = 0; 7385 r = 0;
7386 } 7386 }
7387 spin_unlock_bh(&tp->lock); 7387 spin_unlock_bh(&tp->lock);
@@ -7943,19 +7943,32 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
7943 struct tg3_rx_buffer_desc *desc; 7943 struct tg3_rx_buffer_desc *desc;
7944 7944
7945 if (loopback_mode == TG3_MAC_LOOPBACK) { 7945 if (loopback_mode == TG3_MAC_LOOPBACK) {
7946 /* HW errata - mac loopback fails in some cases on 5780.
7947 * Normal traffic and PHY loopback are not affected by
7948 * errata.
7949 */
7950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
7951 return 0;
7952
7946 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 7953 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7947 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY | 7954 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7948 MAC_MODE_PORT_MODE_GMII; 7955 MAC_MODE_PORT_MODE_GMII;
7949 tw32(MAC_MODE, mac_mode); 7956 tw32(MAC_MODE, mac_mode);
7950 } else if (loopback_mode == TG3_PHY_LOOPBACK) { 7957 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
7958 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7959 BMCR_SPEED1000);
7960 udelay(40);
7961 /* reset to prevent losing 1st rx packet intermittently */
7962 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7963 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7964 udelay(10);
7965 tw32_f(MAC_RX_MODE, tp->rx_mode);
7966 }
7951 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | 7967 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7952 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII; 7968 MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
7953 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) 7969 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
7954 mac_mode &= ~MAC_MODE_LINK_POLARITY; 7970 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7955 tw32(MAC_MODE, mac_mode); 7971 tw32(MAC_MODE, mac_mode);
7956
7957 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
7958 BMCR_SPEED1000);
7959 } 7972 }
7960 else 7973 else
7961 return -EINVAL; 7974 return -EINVAL;
@@ -9271,6 +9284,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9271 static struct pci_device_id write_reorder_chipsets[] = { 9284 static struct pci_device_id write_reorder_chipsets[] = {
9272 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9285 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9273 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9286 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9287 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9288 PCI_DEVICE_ID_VIA_8385_0) },
9274 { }, 9289 { },
9275 }; 9290 };
9276 u32 misc_ctrl_reg; 9291 u32 misc_ctrl_reg;
@@ -9285,15 +9300,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9285 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9300 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9286#endif 9301#endif
9287 9302
9288 /* If we have an AMD 762 chipset, write
9289 * reordering to the mailbox registers done by the host
9290 * controller can cause major troubles. We read back from
9291 * every mailbox register write to force the writes to be
9292 * posted to the chip in order.
9293 */
9294 if (pci_dev_present(write_reorder_chipsets))
9295 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9296
9297 /* Force memory write invalidate off. If we leave it on, 9303 /* Force memory write invalidate off. If we leave it on,
9298 * then on 5700_BX chips we have to enable a workaround. 9304 * then on 5700_BX chips we have to enable a workaround.
9299 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 9305 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -9424,6 +9430,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9424 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) 9430 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9425 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 9431 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9426 9432
9433 /* If we have an AMD 762 or VIA K8T800 chipset, write
9434 * reordering to the mailbox registers done by the host
9435 * controller can cause major troubles. We read back from
9436 * every mailbox register write to force the writes to be
9437 * posted to the chip in order.
9438 */
9439 if (pci_dev_present(write_reorder_chipsets) &&
9440 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9441 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9442
9427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 9443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9428 tp->pci_lat_timer < 64) { 9444 tp->pci_lat_timer < 64) {
9429 tp->pci_lat_timer = 64; 9445 tp->pci_lat_timer = 64;
@@ -9532,7 +9548,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9532 tp->write32_rx_mbox = tg3_write_indirect_mbox; 9548 tp->write32_rx_mbox = tg3_write_indirect_mbox;
9533 9549
9534 iounmap(tp->regs); 9550 iounmap(tp->regs);
9535 tp->regs = 0; 9551 tp->regs = NULL;
9536 9552
9537 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); 9553 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9538 pci_cmd &= ~PCI_COMMAND_MEMORY; 9554 pci_cmd &= ~PCI_COMMAND_MEMORY;
@@ -10338,6 +10354,44 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
10338 }; 10354 };
10339} 10355}
10340 10356
10357static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
10358{
10359 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10360 strcpy(str, "PCI Express");
10361 return str;
10362 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
10363 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
10364
10365 strcpy(str, "PCIX:");
10366
10367 if ((clock_ctrl == 7) ||
10368 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
10369 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
10370 strcat(str, "133MHz");
10371 else if (clock_ctrl == 0)
10372 strcat(str, "33MHz");
10373 else if (clock_ctrl == 2)
10374 strcat(str, "50MHz");
10375 else if (clock_ctrl == 4)
10376 strcat(str, "66MHz");
10377 else if (clock_ctrl == 6)
10378 strcat(str, "100MHz");
10379 else if (clock_ctrl == 7)
10380 strcat(str, "133MHz");
10381 } else {
10382 strcpy(str, "PCI:");
10383 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
10384 strcat(str, "66MHz");
10385 else
10386 strcat(str, "33MHz");
10387 }
10388 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
10389 strcat(str, ":32-bit");
10390 else
10391 strcat(str, ":64-bit");
10392 return str;
10393}
10394
10341static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp) 10395static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
10342{ 10396{
10343 struct pci_dev *peer; 10397 struct pci_dev *peer;
@@ -10400,6 +10454,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10400 struct net_device *dev; 10454 struct net_device *dev;
10401 struct tg3 *tp; 10455 struct tg3 *tp;
10402 int i, err, pci_using_dac, pm_cap; 10456 int i, err, pci_using_dac, pm_cap;
10457 char str[40];
10403 10458
10404 if (tg3_version_printed++ == 0) 10459 if (tg3_version_printed++ == 0)
10405 printk(KERN_INFO "%s", version); 10460 printk(KERN_INFO "%s", version);
@@ -10645,16 +10700,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10645 10700
10646 pci_set_drvdata(pdev, dev); 10701 pci_set_drvdata(pdev, dev);
10647 10702
10648 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ", 10703 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10649 dev->name, 10704 dev->name,
10650 tp->board_part_number, 10705 tp->board_part_number,
10651 tp->pci_chip_rev_id, 10706 tp->pci_chip_rev_id,
10652 tg3_phy_string(tp), 10707 tg3_phy_string(tp),
10653 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""), 10708 tg3_bus_string(tp, str),
10654 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
10655 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
10656 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
10657 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
10658 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000"); 10709 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
10659 10710
10660 for (i = 0; i < 6; i++) 10711 for (i = 0; i < 6; i++)
@@ -10680,7 +10731,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10680err_out_iounmap: 10731err_out_iounmap:
10681 if (tp->regs) { 10732 if (tp->regs) {
10682 iounmap(tp->regs); 10733 iounmap(tp->regs);
10683 tp->regs = 0; 10734 tp->regs = NULL;
10684 } 10735 }
10685 10736
10686err_out_free_dev: 10737err_out_free_dev:
@@ -10705,7 +10756,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
10705 unregister_netdev(dev); 10756 unregister_netdev(dev);
10706 if (tp->regs) { 10757 if (tp->regs) {
10707 iounmap(tp->regs); 10758 iounmap(tp->regs);
10708 tp->regs = 0; 10759 tp->regs = NULL;
10709 } 10760 }
10710 free_netdev(dev); 10761 free_netdev(dev);
10711 pci_release_regions(pdev); 10762 pci_release_regions(pdev);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index c184b773e585..2e733c60bfa4 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2246,6 +2246,7 @@ struct tg3 {
2246 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \ 2246 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
2247 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ 2247 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
2248 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ 2248 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
2249 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5780 || \
2249 (X) == PHY_ID_BCM8002) 2250 (X) == PHY_ID_BCM8002)
2250 2251
2251 struct tg3_hw_stats *hw_stats; 2252 struct tg3_hw_stats *hw_stats;
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 26cc4f6378c7..60d1e05ab732 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -117,7 +117,7 @@ static int xircom_open(struct net_device *dev);
117static int xircom_close(struct net_device *dev); 117static int xircom_close(struct net_device *dev);
118static void xircom_up(struct xircom_private *card); 118static void xircom_up(struct xircom_private *card);
119static struct net_device_stats *xircom_get_stats(struct net_device *dev); 119static struct net_device_stats *xircom_get_stats(struct net_device *dev);
120#if CONFIG_NET_POLL_CONTROLLER 120#ifdef CONFIG_NET_POLL_CONTROLLER
121static void xircom_poll_controller(struct net_device *dev); 121static void xircom_poll_controller(struct net_device *dev);
122#endif 122#endif
123 123
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 48c03c11cd9a..a01efa6d5c62 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
72 } 72 }
73 skb_reserve(skb, 4); 73 skb_reserve(skb, 4);
74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0); 74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
75 data = (cisco_packet*)skb->data; 75 data = (cisco_packet*)(skb->data + 4);
76 76
77 data->type = htonl(type); 77 data->type = htonl(type);
78 data->par1 = htonl(par1); 78 data->par1 = htonl(par1);
diff --git a/drivers/net/wan/sdlamain.c b/drivers/net/wan/sdlamain.c
index 74e151acef3e..7a8b22a7ea31 100644
--- a/drivers/net/wan/sdlamain.c
+++ b/drivers/net/wan/sdlamain.c
@@ -57,6 +57,7 @@
57#include <linux/ioport.h> /* request_region(), release_region() */ 57#include <linux/ioport.h> /* request_region(), release_region() */
58#include <linux/wanrouter.h> /* WAN router definitions */ 58#include <linux/wanrouter.h> /* WAN router definitions */
59#include <linux/wanpipe.h> /* WANPIPE common user API definitions */ 59#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
60#include <linux/rcupdate.h>
60 61
61#include <linux/in.h> 62#include <linux/in.h>
62#include <asm/io.h> /* phys_to_virt() */ 63#include <asm/io.h> /* phys_to_virt() */
@@ -1268,37 +1269,41 @@ unsigned long get_ip_address(struct net_device *dev, int option)
1268 1269
1269 struct in_ifaddr *ifaddr; 1270 struct in_ifaddr *ifaddr;
1270 struct in_device *in_dev; 1271 struct in_device *in_dev;
1272 unsigned long addr = 0;
1271 1273
1272 if ((in_dev = __in_dev_get(dev)) == NULL){ 1274 rcu_read_lock();
1273 return 0; 1275 if ((in_dev = __in_dev_get_rcu(dev)) == NULL){
1276 goto out;
1274 } 1277 }
1275 1278
1276 if ((ifaddr = in_dev->ifa_list)== NULL ){ 1279 if ((ifaddr = in_dev->ifa_list)== NULL ){
1277 return 0; 1280 goto out;
1278 } 1281 }
1279 1282
1280 switch (option){ 1283 switch (option){
1281 1284
1282 case WAN_LOCAL_IP: 1285 case WAN_LOCAL_IP:
1283 return ifaddr->ifa_local; 1286 addr = ifaddr->ifa_local;
1284 break; 1287 break;
1285 1288
1286 case WAN_POINTOPOINT_IP: 1289 case WAN_POINTOPOINT_IP:
1287 return ifaddr->ifa_address; 1290 addr = ifaddr->ifa_address;
1288 break; 1291 break;
1289 1292
1290 case WAN_NETMASK_IP: 1293 case WAN_NETMASK_IP:
1291 return ifaddr->ifa_mask; 1294 addr = ifaddr->ifa_mask;
1292 break; 1295 break;
1293 1296
1294 case WAN_BROADCAST_IP: 1297 case WAN_BROADCAST_IP:
1295 return ifaddr->ifa_broadcast; 1298 addr = ifaddr->ifa_broadcast;
1296 break; 1299 break;
1297 default: 1300 default:
1298 return 0; 1301 break;
1299 } 1302 }
1300 1303
1301 return 0; 1304out:
1305 rcu_read_unlock();
1306 return addr;
1302} 1307}
1303 1308
1304void add_gateway(sdla_t *card, struct net_device *dev) 1309void add_gateway(sdla_t *card, struct net_device *dev)
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index b56a7b516d24..a6d3b55013a5 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -769,7 +769,7 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
769 u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */ 769 u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
770#ifdef CONFIG_INET 770#ifdef CONFIG_INET
771 rcu_read_lock(); 771 rcu_read_lock();
772 if ((in_dev = __in_dev_get(dev)) != NULL) 772 if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
773 { 773 {
774 for (ifa=in_dev->ifa_list; ifa != NULL; 774 for (ifa=in_dev->ifa_list; ifa != NULL;
775 ifa=ifa->ifa_next) { 775 ifa=ifa->ifa_next) {
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 2be65d308fbe..06998c2240d9 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -6852,7 +6852,10 @@ static inline char *airo_translate_scan(struct net_device *dev,
6852 /* Add frequency */ 6852 /* Add frequency */
6853 iwe.cmd = SIOCGIWFREQ; 6853 iwe.cmd = SIOCGIWFREQ;
6854 iwe.u.freq.m = le16_to_cpu(bss->dsChannel); 6854 iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
6855 iwe.u.freq.m = frequency_list[iwe.u.freq.m] * 100000; 6855 /* iwe.u.freq.m containt the channel (starting 1), our
6856 * frequency_list array start at index 0...
6857 */
6858 iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
6856 iwe.u.freq.e = 1; 6859 iwe.u.freq.e = 1;
6857 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); 6860 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN);
6858 6861
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 8de49fe57233..6deb7cc810cc 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -2458,7 +2458,6 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2458 dev->watchdog_timeo = HZ; /* 1 second timeout */ 2458 dev->watchdog_timeo = HZ; /* 1 second timeout */
2459 dev->get_stats = orinoco_get_stats; 2459 dev->get_stats = orinoco_get_stats;
2460 dev->ethtool_ops = &orinoco_ethtool_ops; 2460 dev->ethtool_ops = &orinoco_ethtool_ops;
2461 dev->get_wireless_stats = orinoco_get_wireless_stats;
2462 dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def; 2461 dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def;
2463 dev->change_mtu = orinoco_change_mtu; 2462 dev->change_mtu = orinoco_change_mtu;
2464 dev->set_multicast_list = orinoco_set_multicast_list; 2463 dev->set_multicast_list = orinoco_set_multicast_list;
@@ -4399,6 +4398,7 @@ static const struct iw_handler_def orinoco_handler_def = {
4399 .standard = orinoco_handler, 4398 .standard = orinoco_handler,
4400 .private = orinoco_private_handler, 4399 .private = orinoco_private_handler,
4401 .private_args = orinoco_privtab, 4400 .private_args = orinoco_privtab,
4401 .get_wireless_stats = orinoco_get_wireless_stats,
4402}; 4402};
4403 4403
4404static void orinoco_get_drvinfo(struct net_device *dev, 4404static void orinoco_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index d1fb1bab8aa8..bedd7f9f23e4 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -629,6 +629,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
629 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90), 629 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
630 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584), 630 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
631 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9), 631 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
632 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
632 PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac), 633 PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
633 PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab), 634 PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
634 PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3), 635 PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 4b0acae22b0d..7bc7fc823128 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1352,7 +1352,7 @@ static unsigned char *strip_make_packet(unsigned char *buffer,
1352 struct in_device *in_dev; 1352 struct in_device *in_dev;
1353 1353
1354 rcu_read_lock(); 1354 rcu_read_lock();
1355 in_dev = __in_dev_get(strip_info->dev); 1355 in_dev = __in_dev_get_rcu(strip_info->dev);
1356 if (in_dev == NULL) { 1356 if (in_dev == NULL) {
1357 rcu_read_unlock(); 1357 rcu_read_unlock();
1358 return NULL; 1358 return NULL;
@@ -1508,7 +1508,7 @@ static void strip_send(struct strip *strip_info, struct sk_buff *skb)
1508 1508
1509 brd = addr = 0; 1509 brd = addr = 0;
1510 rcu_read_lock(); 1510 rcu_read_lock();
1511 in_dev = __in_dev_get(strip_info->dev); 1511 in_dev = __in_dev_get_rcu(strip_info->dev);
1512 if (in_dev) { 1512 if (in_dev) {
1513 if (in_dev->ifa_list) { 1513 if (in_dev->ifa_list) {
1514 brd = in_dev->ifa_list->ifa_broadcast; 1514 brd = in_dev->ifa_list->ifa_broadcast;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index e90fb72a6962..286902298e33 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -37,6 +37,7 @@
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/ctype.h> 38#include <linux/ctype.h>
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
40#include <linux/rcupdate.h>
40#include <asm/io.h> 41#include <asm/io.h>
41#include <asm/processor.h> 42#include <asm/processor.h>
42#include <asm/hardware.h> 43#include <asm/hardware.h>
@@ -358,9 +359,10 @@ static __inline__ int led_get_net_activity(void)
358 /* we are running as tasklet, so locking dev_base 359 /* we are running as tasklet, so locking dev_base
359 * for reading should be OK */ 360 * for reading should be OK */
360 read_lock(&dev_base_lock); 361 read_lock(&dev_base_lock);
362 rcu_read_lock();
361 for (dev = dev_base; dev; dev = dev->next) { 363 for (dev = dev_base; dev; dev = dev->next) {
362 struct net_device_stats *stats; 364 struct net_device_stats *stats;
363 struct in_device *in_dev = __in_dev_get(dev); 365 struct in_device *in_dev = __in_dev_get_rcu(dev);
364 if (!in_dev || !in_dev->ifa_list) 366 if (!in_dev || !in_dev->ifa_list)
365 continue; 367 continue;
366 if (LOOPBACK(in_dev->ifa_list->ifa_local)) 368 if (LOOPBACK(in_dev->ifa_list->ifa_local))
@@ -371,6 +373,7 @@ static __inline__ int led_get_net_activity(void)
371 rx_total += stats->rx_packets; 373 rx_total += stats->rx_packets;
372 tx_total += stats->tx_packets; 374 tx_total += stats->tx_packets;
373 } 375 }
376 rcu_read_unlock();
374 read_unlock(&dev_base_lock); 377 read_unlock(&dev_base_lock);
375 378
376 retval = 0; 379 retval = 0;
diff --git a/drivers/pci/hotplug.c b/drivers/pci/hotplug.c
index 10444988a10b..e1743be31909 100644
--- a/drivers/pci/hotplug.c
+++ b/drivers/pci/hotplug.c
@@ -7,7 +7,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
7 char *buffer, int buffer_size) 7 char *buffer, int buffer_size)
8{ 8{
9 struct pci_dev *pdev; 9 struct pci_dev *pdev;
10 char *scratch;
11 int i = 0; 10 int i = 0;
12 int length = 0; 11 int length = 0;
13 12
@@ -18,9 +17,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
18 if (!pdev) 17 if (!pdev)
19 return -ENODEV; 18 return -ENODEV;
20 19
21 scratch = buffer;
22
23
24 if (add_hotplug_env_var(envp, num_envp, &i, 20 if (add_hotplug_env_var(envp, num_envp, &i,
25 buffer, buffer_size, &length, 21 buffer, buffer_size, &length,
26 "PCI_CLASS=%04X", pdev->class)) 22 "PCI_CLASS=%04X", pdev->class))
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 8122fe734aa7..b1ba429e0a2d 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -558,7 +558,7 @@ static int configure_device (struct pci_func *func)
558 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE); 558 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE);
559 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY); 559 pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY);
560 560
561 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_ROM_ADDRESS, 0x00L); 561 pci_bus_write_config_dword (ibmphp_pci_bus, devfn, PCI_ROM_ADDRESS, 0x00L);
562 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE); 562 pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE);
563 563
564 return 0; 564 return 0;
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 0e0947601526..898f6da6f0de 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -2526,7 +2526,6 @@ configure_new_function(struct controller *ctrl, struct pci_func *func,
2526 int cloop; 2526 int cloop;
2527 u8 temp_byte; 2527 u8 temp_byte;
2528 u8 class_code; 2528 u8 class_code;
2529 u16 temp_word;
2530 u32 rc; 2529 u32 rc;
2531 u32 temp_register; 2530 u32 temp_register;
2532 u32 base; 2531 u32 base;
@@ -2682,8 +2681,7 @@ configure_new_function(struct controller *ctrl, struct pci_func *func,
2682 } /* End of base register loop */ 2681 } /* End of base register loop */
2683 2682
2684 /* disable ROM base Address */ 2683 /* disable ROM base Address */
2685 temp_word = 0x00L; 2684 rc = pci_bus_write_config_dword (pci_bus, devfn, PCI_ROM_ADDRESS, 0x00);
2686 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_ROM_ADDRESS, temp_word);
2687 2685
2688 /* Set HP parameters (Cache Line Size, Latency Timer) */ 2686 /* Set HP parameters (Cache Line Size, Latency Timer) */
2689 rc = pciehprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL); 2687 rc = pciehprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL);
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
index 752e6513c447..db69be85b458 100644
--- a/drivers/pci/hotplug/rpadlpar_sysfs.c
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -62,7 +62,7 @@ static ssize_t add_slot_store(struct dlpar_io_attr *dlpar_attr,
62 char drc_name[MAX_DRC_NAME_LEN]; 62 char drc_name[MAX_DRC_NAME_LEN];
63 char *end; 63 char *end;
64 64
65 if (nbytes > MAX_DRC_NAME_LEN) 65 if (nbytes >= MAX_DRC_NAME_LEN)
66 return 0; 66 return 0;
67 67
68 memcpy(drc_name, buf, nbytes); 68 memcpy(drc_name, buf, nbytes);
@@ -83,7 +83,7 @@ static ssize_t remove_slot_store(struct dlpar_io_attr *dlpar_attr,
83 char drc_name[MAX_DRC_NAME_LEN]; 83 char drc_name[MAX_DRC_NAME_LEN];
84 char *end; 84 char *end;
85 85
86 if (nbytes > MAX_DRC_NAME_LEN) 86 if (nbytes >= MAX_DRC_NAME_LEN)
87 return 0; 87 return 0;
88 88
89 memcpy(drc_name, buf, nbytes); 89 memcpy(drc_name, buf, nbytes);
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index b1409441c1cd..a32ae82e5922 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -159,7 +159,7 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
159 159
160 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); 160 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
161 161
162 slot = kcalloc(1, sizeof(*slot), GFP_KERNEL); 162 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
163 if (!slot) 163 if (!slot)
164 return -ENOMEM; 164 return -ENOMEM;
165 bss_hotplug_slot->private = slot; 165 bss_hotplug_slot->private = slot;
@@ -491,7 +491,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
491 if (sn_pci_slot_valid(pci_bus, device) != 1) 491 if (sn_pci_slot_valid(pci_bus, device) != 1)
492 continue; 492 continue;
493 493
494 bss_hotplug_slot = kcalloc(1, sizeof(*bss_hotplug_slot), 494 bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
495 GFP_KERNEL); 495 GFP_KERNEL);
496 if (!bss_hotplug_slot) { 496 if (!bss_hotplug_slot) {
497 rc = -ENOMEM; 497 rc = -ENOMEM;
@@ -499,7 +499,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
499 } 499 }
500 500
501 bss_hotplug_slot->info = 501 bss_hotplug_slot->info =
502 kcalloc(1, sizeof(struct hotplug_slot_info), 502 kzalloc(sizeof(struct hotplug_slot_info),
503 GFP_KERNEL); 503 GFP_KERNEL);
504 if (!bss_hotplug_slot->info) { 504 if (!bss_hotplug_slot->info) {
505 rc = -ENOMEM; 505 rc = -ENOMEM;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 783b5abb0717..91c9903e621f 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -2824,8 +2824,7 @@ static int configure_new_function (struct controller * ctrl, struct pci_func * f
2824 } 2824 }
2825#endif 2825#endif
2826 /* Disable ROM base Address */ 2826 /* Disable ROM base Address */
2827 temp_word = 0x00L; 2827 rc = pci_bus_write_config_dword (pci_bus, devfn, PCI_ROM_ADDRESS, 0x00);
2828 rc = pci_bus_write_config_word (pci_bus, devfn, PCI_ROM_ADDRESS, temp_word);
2829 2828
2830 /* Set HP parameters (Cache Line Size, Latency Timer) */ 2829 /* Set HP parameters (Cache Line Size, Latency Timer) */
2831 rc = shpchprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL); 2830 rc = shpchprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 56a3b397efee..2898830c496f 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -360,7 +360,7 @@ pci_create_resource_files(struct pci_dev *pdev)
360 continue; 360 continue;
361 361
362 /* allocate attribute structure, piggyback attribute name */ 362 /* allocate attribute structure, piggyback attribute name */
363 res_attr = kcalloc(1, sizeof(*res_attr) + 10, GFP_ATOMIC); 363 res_attr = kzalloc(sizeof(*res_attr) + 10, GFP_ATOMIC);
364 if (res_attr) { 364 if (res_attr) {
365 char *res_attr_name = (char *)(res_attr + 1); 365 char *res_attr_name = (char *)(res_attr + 1);
366 366
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 992db89adce7..259d247b7551 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -309,17 +309,25 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
309 309
310 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 310 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
311 311
312 /* If we're in D3, force entire word to 0. 312 /* If we're (effectively) in D3, force entire word to 0.
313 * This doesn't affect PME_Status, disables PME_En, and 313 * This doesn't affect PME_Status, disables PME_En, and
314 * sets PowerState to 0. 314 * sets PowerState to 0.
315 */ 315 */
316 if (dev->current_state >= PCI_D3hot) { 316 switch (dev->current_state) {
317 if (!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 317 case PCI_UNKNOWN: /* Boot-up */
318 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
319 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
318 need_restore = 1; 320 need_restore = 1;
321 /* Fall-through: force to D0 */
322 case PCI_D3hot:
323 case PCI_D3cold:
324 case PCI_POWER_ERROR:
319 pmcsr = 0; 325 pmcsr = 0;
320 } else { 326 break;
327 default:
321 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 328 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
322 pmcsr |= state; 329 pmcsr |= state;
330 break;
323 } 331 }
324 332
325 /* enter specified state */ 333 /* enter specified state */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 26a55d08b506..005786416bb5 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -165,7 +165,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
165 if (l == 0xffffffff) 165 if (l == 0xffffffff)
166 l = 0; 166 l = 0;
167 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { 167 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) {
168 sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK); 168 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
169 if (!sz) 169 if (!sz)
170 continue; 170 continue;
171 res->start = l & PCI_BASE_ADDRESS_MEM_MASK; 171 res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
@@ -215,7 +215,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
215 if (l == 0xffffffff) 215 if (l == 0xffffffff)
216 l = 0; 216 l = 0;
217 if (sz && sz != 0xffffffff) { 217 if (sz && sz != 0xffffffff) {
218 sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK); 218 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
219 if (sz) { 219 if (sz) {
220 res->flags = (l & IORESOURCE_ROM_ENABLE) | 220 res->flags = (l & IORESOURCE_ROM_ENABLE) |
221 IORESOURCE_MEM | IORESOURCE_PREFETCH | 221 IORESOURCE_MEM | IORESOURCE_PREFETCH |
@@ -402,6 +402,12 @@ static void pci_enable_crs(struct pci_dev *dev)
402static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) 402static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
403{ 403{
404 struct pci_bus *parent = child->parent; 404 struct pci_bus *parent = child->parent;
405
406 /* Attempts to fix that up are really dangerous unless
407 we're going to re-assign all bus numbers. */
408 if (!pcibios_assign_all_busses())
409 return;
410
405 while (parent->parent && parent->subordinate < max) { 411 while (parent->parent && parent->subordinate < max) {
406 parent->subordinate = max; 412 parent->subordinate = max;
407 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); 413 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
@@ -478,8 +484,18 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
478 * We need to assign a number to this bus which we always 484 * We need to assign a number to this bus which we always
479 * do in the second pass. 485 * do in the second pass.
480 */ 486 */
481 if (!pass) 487 if (!pass) {
488 if (pcibios_assign_all_busses())
489 /* Temporarily disable forwarding of the
490 configuration cycles on all bridges in
491 this bus segment to avoid possible
492 conflicts in the second pass between two
493 bridges programmed with overlapping
494 bus ranges. */
495 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
496 buses & ~0xffffff);
482 return max; 497 return max;
498 }
483 499
484 /* Clear errors */ 500 /* Clear errors */
485 pci_write_config_word(dev, PCI_STATUS, 0xffff); 501 pci_write_config_word(dev, PCI_STATUS, 0xffff);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index ddc741e6ecbf..36cc9a96a338 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -146,7 +146,7 @@ config I82365
146 146
147config TCIC 147config TCIC
148 tristate "Databook TCIC host bridge support" 148 tristate "Databook TCIC host bridge support"
149 depends on PCMCIA 149 depends on PCMCIA && ISA
150 select PCCARD_NONSTATIC 150 select PCCARD_NONSTATIC
151 help 151 help
152 Say Y here to include support for the Databook TCIC family of PCMCIA 152 Say Y here to include support for the Databook TCIC family of PCMCIA
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 1d755e20880c..3f6d51d11374 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -228,6 +228,11 @@ int cb_alloc(struct pcmcia_socket * s)
228 pci_bus_size_bridges(bus); 228 pci_bus_size_bridges(bus);
229 pci_bus_assign_resources(bus); 229 pci_bus_assign_resources(bus);
230 cardbus_assign_irqs(bus, s->pci_irq); 230 cardbus_assign_irqs(bus, s->pci_irq);
231
232 /* socket specific tune function */
233 if (s->tune_bridge)
234 s->tune_bridge(s, bus);
235
231 pci_enable_bridges(bus); 236 pci_enable_bridges(bus);
232 pci_bus_add_devices(bus); 237 pci_bus_add_devices(bus);
233 238
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 08d1c9288264..94be9e51654e 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -22,7 +22,6 @@
22 22
23#include <asm/hardware.h> 23#include <asm/hardware.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/mach-types.h>
26#include <asm/sizes.h> 25#include <asm/sizes.h>
27 26
28#include <asm/arch/mux.h> 27#include <asm/arch/mux.h>
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 39ba6406fd54..80969f7e7a0b 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -376,6 +376,7 @@ static int ds_open(struct inode *inode, struct file *file)
376 socket_t i = iminor(inode); 376 socket_t i = iminor(inode);
377 struct pcmcia_socket *s; 377 struct pcmcia_socket *s;
378 user_info_t *user; 378 user_info_t *user;
379 static int warning_printed = 0;
379 380
380 ds_dbg(0, "ds_open(socket %d)\n", i); 381 ds_dbg(0, "ds_open(socket %d)\n", i);
381 382
@@ -407,6 +408,17 @@ static int ds_open(struct inode *inode, struct file *file)
407 s->user = user; 408 s->user = user;
408 file->private_data = user; 409 file->private_data = user;
409 410
411 if (!warning_printed) {
412 printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl "
413 "usage.\n");
414 printk(KERN_INFO "pcmcia: This interface will soon be removed from "
415 "the kernel; please expect breakage unless you upgrade "
416 "to new tools.\n");
417 printk(KERN_INFO "pcmcia: see http://www.kernel.org/pub/linux/"
418 "utils/kernel/pcmcia/pcmcia.html for details.\n");
419 warning_printed = 1;
420 }
421
410 if (s->pcmcia_state.present) 422 if (s->pcmcia_state.present)
411 queue_event(user, CS_EVENT_CARD_INSERTION); 423 queue_event(user, CS_EVENT_CARD_INSERTION);
412 return 0; 424 return 0;
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index c42455d20eb6..f9a5c70284b5 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -691,7 +691,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
691 unsigned long size = end - start + 1; 691 unsigned long size = end - start + 1;
692 int ret = 0; 692 int ret = 0;
693 693
694 if (end <= start) 694 if (end < start)
695 return -EINVAL; 695 return -EINVAL;
696 696
697 down(&rsrc_sem); 697 down(&rsrc_sem);
@@ -724,7 +724,7 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
724 unsigned long size = end - start + 1; 724 unsigned long size = end - start + 1;
725 int ret = 0; 725 int ret = 0;
726 726
727 if (end <= start) 727 if (end < start)
728 return -EINVAL; 728 return -EINVAL;
729 729
730 if (end > IO_SPACE_LIMIT) 730 if (end > IO_SPACE_LIMIT)
@@ -817,7 +817,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
817 817
818 /* if we got at least one of IO, and one of MEM, we can be glad and 818 /* if we got at least one of IO, and one of MEM, we can be glad and
819 * activate the PCMCIA subsystem */ 819 * activate the PCMCIA subsystem */
820 if (done & (IORESOURCE_MEM | IORESOURCE_IO)) 820 if (done == (IORESOURCE_MEM | IORESOURCE_IO))
821 s->resource_setup_done = 1; 821 s->resource_setup_done = 1;
822 822
823 return 0; 823 return 0;
@@ -925,7 +925,7 @@ static ssize_t store_io_db(struct class_device *class_dev, const char *buf, size
925 return -EINVAL; 925 return -EINVAL;
926 } 926 }
927 } 927 }
928 if (end_addr <= start_addr) 928 if (end_addr < start_addr)
929 return -EINVAL; 929 return -EINVAL;
930 930
931 ret = adjust_io(s, add, start_addr, end_addr); 931 ret = adjust_io(s, add, start_addr, end_addr);
@@ -977,7 +977,7 @@ static ssize_t store_mem_db(struct class_device *class_dev, const char *buf, siz
977 return -EINVAL; 977 return -EINVAL;
978 } 978 }
979 } 979 }
980 if (end_addr <= start_addr) 980 if (end_addr < start_addr)
981 return -EINVAL; 981 return -EINVAL;
982 982
983 ret = adjust_memory(s, add, start_addr, end_addr); 983 ret = adjust_memory(s, add, start_addr, end_addr);
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index fbe233e19ceb..da0b404561c9 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -59,6 +59,7 @@
59 59
60#define TI122X_SCR_SER_STEP 0xc0000000 60#define TI122X_SCR_SER_STEP 0xc0000000
61#define TI122X_SCR_INTRTIE 0x20000000 61#define TI122X_SCR_INTRTIE 0x20000000
62#define TIXX21_SCR_TIEALL 0x10000000
62#define TI122X_SCR_CBRSVD 0x00400000 63#define TI122X_SCR_CBRSVD 0x00400000
63#define TI122X_SCR_MRBURSTDN 0x00008000 64#define TI122X_SCR_MRBURSTDN 0x00008000
64#define TI122X_SCR_MRBURSTUP 0x00004000 65#define TI122X_SCR_MRBURSTUP 0x00004000
@@ -153,6 +154,12 @@
153/* EnE test register */ 154/* EnE test register */
154#define ENE_TEST_C9 0xc9 /* 8bit */ 155#define ENE_TEST_C9 0xc9 /* 8bit */
155#define ENE_TEST_C9_TLTENABLE 0x02 156#define ENE_TEST_C9_TLTENABLE 0x02
157#define ENE_TEST_C9_PFENABLE_F0 0x04
158#define ENE_TEST_C9_PFENABLE_F1 0x08
159#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F0)
160#define ENE_TEST_C9_WPDISALBLE_F0 0x40
161#define ENE_TEST_C9_WPDISALBLE_F1 0x80
162#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1)
156 163
157/* 164/*
158 * Texas Instruments CardBus controller overrides. 165 * Texas Instruments CardBus controller overrides.
@@ -618,6 +625,7 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
618 int devfn; 625 int devfn;
619 unsigned int state; 626 unsigned int state;
620 int ret = 1; 627 int ret = 1;
628 u32 sysctl;
621 629
622 /* catch the two-slot controllers */ 630 /* catch the two-slot controllers */
623 switch (socket->dev->device) { 631 switch (socket->dev->device) {
@@ -640,6 +648,24 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
640 */ 648 */
641 break; 649 break;
642 650
651 case PCI_DEVICE_ID_TI_X515:
652 case PCI_DEVICE_ID_TI_X420:
653 case PCI_DEVICE_ID_TI_X620:
654 case PCI_DEVICE_ID_TI_XX21_XX11:
655 case PCI_DEVICE_ID_TI_7410:
656 case PCI_DEVICE_ID_TI_7610:
657 /*
658 * those are either single or dual slot CB with additional functions
659 * like 1394, smartcard reader, etc. check the TIEALL flag for them
660 * the TIEALL flag binds the IRQ of all functions toghether.
661 * we catch the single slot variants later.
662 */
663 sysctl = config_readl(socket, TI113X_SYSTEM_CONTROL);
664 if (sysctl & TIXX21_SCR_TIEALL)
665 return 0;
666
667 break;
668
643 /* single-slot controllers have the 2nd slot empty always :) */ 669 /* single-slot controllers have the 2nd slot empty always :) */
644 default: 670 default:
645 return 1; 671 return 1;
@@ -652,6 +678,15 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket)
652 if (!func) 678 if (!func)
653 return 1; 679 return 1;
654 680
681 /*
682 * check that the device id of both slots match. this is needed for the
683 * XX21 and the XX11 controller that share the same device id for single
684 * and dual slot controllers. return '2nd slot empty'. we already checked
685 * if the interrupt is tied to another function.
686 */
687 if (socket->dev->device != func->device)
688 goto out;
689
655 slot2 = pci_get_drvdata(func); 690 slot2 = pci_get_drvdata(func);
656 if (!slot2) 691 if (!slot2)
657 goto out; 692 goto out;
@@ -791,16 +826,6 @@ static int ti12xx_override(struct yenta_socket *socket)
791 config_writel(socket, TI113X_SYSTEM_CONTROL, val); 826 config_writel(socket, TI113X_SYSTEM_CONTROL, val);
792 827
793 /* 828 /*
794 * for EnE bridges only: clear testbit TLTEnable. this makes the
795 * RME Hammerfall DSP sound card working.
796 */
797 if (socket->dev->vendor == PCI_VENDOR_ID_ENE) {
798 u8 test_c9 = config_readb(socket, ENE_TEST_C9);
799 test_c9 &= ~ENE_TEST_C9_TLTENABLE;
800 config_writeb(socket, ENE_TEST_C9, test_c9);
801 }
802
803 /*
804 * Yenta expects controllers to use CSCINT to route 829 * Yenta expects controllers to use CSCINT to route
805 * CSC interrupts to PCI rather than INTVAL. 830 * CSC interrupts to PCI rather than INTVAL.
806 */ 831 */
@@ -841,5 +866,75 @@ static int ti1250_override(struct yenta_socket *socket)
841 return ti12xx_override(socket); 866 return ti12xx_override(socket);
842} 867}
843 868
869
870/**
871 * EnE specific part. EnE bridges are register compatible with TI bridges but
872 * have their own test registers and more important their own little problems.
873 * Some fixup code to make everybody happy (TM).
874 */
875
876/**
877 * set/clear various test bits:
878 * Defaults to clear the bit.
879 * - mask (u8) defines what bits to change
880 * - bits (u8) is the values to change them to
881 * -> it's
882 * current = (current & ~mask) | bits
883 */
884/* pci ids of devices that wants to have the bit set */
885#define DEVID(_vend,_dev,_subvend,_subdev,mask,bits) { \
886 .vendor = _vend, \
887 .device = _dev, \
888 .subvendor = _subvend, \
889 .subdevice = _subdev, \
890 .driver_data = ((mask) << 8 | (bits)), \
891 }
892static struct pci_device_id ene_tune_tbl[] = {
893 /* Echo Audio products based on motorola DSP56301 and DSP56361 */
894 DEVID(PCI_VENDOR_ID_MOTOROLA, 0x1801, 0xECC0, PCI_ANY_ID,
895 ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
896 DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID,
897 ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
898
899 {}
900};
901
902static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus)
903{
904 struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
905 struct pci_dev *dev;
906 struct pci_device_id *id = NULL;
907 u8 test_c9, old_c9, mask, bits;
908
909 list_for_each_entry(dev, &bus->devices, bus_list) {
910 id = (struct pci_device_id *) pci_match_id(ene_tune_tbl, dev);
911 if (id)
912 break;
913 }
914
915 test_c9 = old_c9 = config_readb(socket, ENE_TEST_C9);
916 if (id) {
917 mask = (id->driver_data >> 8) & 0xFF;
918 bits = id->driver_data & 0xFF;
919
920 test_c9 = (test_c9 & ~mask) | bits;
921 }
922 else
923 /* default to clear TLTEnable bit, old behaviour */
924 test_c9 &= ~ENE_TEST_C9_TLTENABLE;
925
926 printk(KERN_INFO "yenta EnE: chaning testregister 0xC9, %02x -> %02x\n", old_c9, test_c9);
927 config_writeb(socket, ENE_TEST_C9, test_c9);
928}
929
930
931static int ene_override(struct yenta_socket *socket)
932{
933 /* install tune_bridge() function */
934 socket->socket.tune_bridge = ene_tune_bridge;
935
936 return ti1250_override(socket);
937}
938
844#endif /* _LINUX_TI113X_H */ 939#endif /* _LINUX_TI113X_H */
845 940
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index f0997c36c9b7..db9f952f9e3c 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -559,12 +559,6 @@ static void yenta_interrogate(struct yenta_socket *socket)
559static int yenta_sock_init(struct pcmcia_socket *sock) 559static int yenta_sock_init(struct pcmcia_socket *sock)
560{ 560{
561 struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket); 561 struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
562 u16 bridge;
563
564 bridge = config_readw(socket, CB_BRIDGE_CONTROL) & ~CB_BRIDGE_INTR;
565 if (!socket->cb_irq)
566 bridge |= CB_BRIDGE_INTR;
567 config_writew(socket, CB_BRIDGE_CONTROL, bridge);
568 562
569 exca_writeb(socket, I365_GBLCTL, 0x00); 563 exca_writeb(socket, I365_GBLCTL, 0x00);
570 exca_writeb(socket, I365_GENCTL, 0x00); 564 exca_writeb(socket, I365_GENCTL, 0x00);
@@ -819,6 +813,7 @@ enum {
819 CARDBUS_TYPE_TOPIC95, 813 CARDBUS_TYPE_TOPIC95,
820 CARDBUS_TYPE_TOPIC97, 814 CARDBUS_TYPE_TOPIC97,
821 CARDBUS_TYPE_O2MICRO, 815 CARDBUS_TYPE_O2MICRO,
816 CARDBUS_TYPE_ENE,
822}; 817};
823 818
824/* 819/*
@@ -865,6 +860,12 @@ static struct cardbus_type cardbus_type[] = {
865 .override = o2micro_override, 860 .override = o2micro_override,
866 .restore_state = o2micro_restore_state, 861 .restore_state = o2micro_restore_state,
867 }, 862 },
863 [CARDBUS_TYPE_ENE] = {
864 .override = ene_override,
865 .save_state = ti_save_state,
866 .restore_state = ti_restore_state,
867 .sock_init = ti_init,
868 },
868}; 869};
869 870
870 871
@@ -883,16 +884,8 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas
883{ 884{
884 int i; 885 int i;
885 unsigned long val; 886 unsigned long val;
886 u16 bridge_ctrl;
887 u32 mask; 887 u32 mask;
888 888
889 /* Set up ISA irq routing to probe the ISA irqs.. */
890 bridge_ctrl = config_readw(socket, CB_BRIDGE_CONTROL);
891 if (!(bridge_ctrl & CB_BRIDGE_INTR)) {
892 bridge_ctrl |= CB_BRIDGE_INTR;
893 config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
894 }
895
896 /* 889 /*
897 * Probe for usable interrupts using the force 890 * Probe for usable interrupts using the force
898 * register to generate bogus card status events. 891 * register to generate bogus card status events.
@@ -914,9 +907,6 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas
914 907
915 mask = probe_irq_mask(val) & 0xffff; 908 mask = probe_irq_mask(val) & 0xffff;
916 909
917 bridge_ctrl &= ~CB_BRIDGE_INTR;
918 config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
919
920 return mask; 910 return mask;
921} 911}
922 912
@@ -944,18 +934,11 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id, struct pt_regs *re
944/* probes the PCI interrupt, use only on override functions */ 934/* probes the PCI interrupt, use only on override functions */
945static int yenta_probe_cb_irq(struct yenta_socket *socket) 935static int yenta_probe_cb_irq(struct yenta_socket *socket)
946{ 936{
947 u16 bridge_ctrl;
948
949 if (!socket->cb_irq) 937 if (!socket->cb_irq)
950 return -1; 938 return -1;
951 939
952 socket->probe_status = 0; 940 socket->probe_status = 0;
953 941
954 /* disable ISA interrupts */
955 bridge_ctrl = config_readw(socket, CB_BRIDGE_CONTROL);
956 bridge_ctrl &= ~CB_BRIDGE_INTR;
957 config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl);
958
959 if (request_irq(socket->cb_irq, yenta_probe_handler, SA_SHIRQ, "yenta", socket)) { 942 if (request_irq(socket->cb_irq, yenta_probe_handler, SA_SHIRQ, "yenta", socket)) {
960 printk(KERN_WARNING "Yenta: request_irq() in yenta_probe_cb_irq() failed!\n"); 943 printk(KERN_WARNING "Yenta: request_irq() in yenta_probe_cb_irq() failed!\n");
961 return -1; 944 return -1;
@@ -966,7 +949,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket)
966 cb_writel(socket, CB_SOCKET_EVENT, -1); 949 cb_writel(socket, CB_SOCKET_EVENT, -1);
967 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK); 950 cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
968 cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS); 951 cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS);
969 952
970 msleep(100); 953 msleep(100);
971 954
972 /* disable interrupts */ 955 /* disable interrupts */
@@ -1004,11 +987,12 @@ static void yenta_config_init(struct yenta_socket *socket)
1004{ 987{
1005 u16 bridge; 988 u16 bridge;
1006 struct pci_dev *dev = socket->dev; 989 struct pci_dev *dev = socket->dev;
990 struct pci_bus_region region;
1007 991
1008 pci_set_power_state(socket->dev, 0); 992 pcibios_resource_to_bus(socket->dev, &region, &dev->resource[0]);
1009 993
1010 config_writel(socket, CB_LEGACY_MODE_BASE, 0); 994 config_writel(socket, CB_LEGACY_MODE_BASE, 0);
1011 config_writel(socket, PCI_BASE_ADDRESS_0, dev->resource[0].start); 995 config_writel(socket, PCI_BASE_ADDRESS_0, region.start);
1012 config_writew(socket, PCI_COMMAND, 996 config_writew(socket, PCI_COMMAND,
1013 PCI_COMMAND_IO | 997 PCI_COMMAND_IO |
1014 PCI_COMMAND_MEMORY | 998 PCI_COMMAND_MEMORY |
@@ -1031,8 +1015,8 @@ static void yenta_config_init(struct yenta_socket *socket)
1031 * - PCI interrupts enabled if a PCI interrupt exists.. 1015 * - PCI interrupts enabled if a PCI interrupt exists..
1032 */ 1016 */
1033 bridge = config_readw(socket, CB_BRIDGE_CONTROL); 1017 bridge = config_readw(socket, CB_BRIDGE_CONTROL);
1034 bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_INTR | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN); 1018 bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN);
1035 bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN | CB_BRIDGE_INTR; 1019 bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN;
1036 config_writew(socket, CB_BRIDGE_CONTROL, bridge); 1020 config_writew(socket, CB_BRIDGE_CONTROL, bridge);
1037} 1021}
1038 1022
@@ -1045,7 +1029,18 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
1045{ 1029{
1046 struct yenta_socket *socket; 1030 struct yenta_socket *socket;
1047 int ret; 1031 int ret;
1048 1032
1033 /*
1034 * If we failed to assign proper bus numbers for this cardbus
1035 * controller during PCI probe, its subordinate pci_bus is NULL.
1036 * Bail out if so.
1037 */
1038 if (!dev->subordinate) {
1039 printk(KERN_ERR "Yenta: no bus associated with %s! "
1040 "(try 'pci=assign-busses')\n", pci_name(dev));
1041 return -ENODEV;
1042 }
1043
1049 socket = kmalloc(sizeof(struct yenta_socket), GFP_KERNEL); 1044 socket = kmalloc(sizeof(struct yenta_socket), GFP_KERNEL);
1050 if (!socket) 1045 if (!socket)
1051 return -ENOMEM; 1046 return -ENOMEM;
@@ -1254,10 +1249,22 @@ static struct pci_device_id yenta_table [] = {
1254 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250), 1249 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250),
1255 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250), 1250 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250),
1256 1251
1257 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, TI12XX), 1252 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11, TI12XX),
1258 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, TI12XX), 1253 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X515, TI12XX),
1259 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, TI1250), 1254 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X420, TI12XX),
1260 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, TI12XX), 1255 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X620, TI12XX),
1256 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7410, TI12XX),
1257 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX),
1258 CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX),
1259
1260 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX),
1261 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX),
1262 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX),
1263 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX),
1264 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE),
1265 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE),
1266 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE),
1267 CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, ENE),
1261 1268
1262 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C465, RICOH), 1269 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C465, RICOH),
1263 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C466, RICOH), 1270 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C466, RICOH),
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index aac83ce6469c..a1c52a682191 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/blacklist.c 2 * drivers/s390/cio/blacklist.c
3 * S/390 common I/O routines -- blacklisting of specific devices 3 * S/390 common I/O routines -- blacklisting of specific devices
4 * $Revision: 1.34 $ 4 * $Revision: 1.35 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -35,7 +35,7 @@
35 */ 35 */
36 36
37/* 65536 bits to indicate if a devno is blacklisted or not */ 37/* 65536 bits to indicate if a devno is blacklisted or not */
38#define __BL_DEV_WORDS (__MAX_SUBCHANNELS + (8*sizeof(long) - 1) / \ 38#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \
39 (8*sizeof(long))) 39 (8*sizeof(long)))
40static unsigned long bl_dev[__BL_DEV_WORDS]; 40static unsigned long bl_dev[__BL_DEV_WORDS];
41typedef enum {add, free} range_action; 41typedef enum {add, free} range_action;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 91ea8e4777f3..dbb3eb0e330b 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -437,7 +437,7 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
437 if (cdev->dev.driver_data) { 437 if (cdev->dev.driver_data) {
438 gdev = (struct ccwgroup_device *)cdev->dev.driver_data; 438 gdev = (struct ccwgroup_device *)cdev->dev.driver_data;
439 if (get_device(&gdev->dev)) { 439 if (get_device(&gdev->dev)) {
440 if (klist_node_attached(&gdev->dev.knode_bus)) 440 if (device_is_registered(&gdev->dev))
441 return gdev; 441 return gdev;
442 put_device(&gdev->dev); 442 put_device(&gdev->dev);
443 } 443 }
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
index 6aeef3bacc33..0cb47eca91f3 100644
--- a/drivers/s390/crypto/z90main.c
+++ b/drivers/s390/crypto/z90main.c
@@ -682,9 +682,6 @@ z90crypt_cleanup_module(void)
682 del_timer(&config_timer); 682 del_timer(&config_timer);
683 del_timer(&cleanup_timer); 683 del_timer(&cleanup_timer);
684 684
685 if (z90_device_work)
686 destroy_workqueue(z90_device_work);
687
688 destroy_z90crypt(); 685 destroy_z90crypt();
689 686
690 PRINTKN("Unloaded.\n"); 687 PRINTKN("Unloaded.\n");
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 96ca863eaff2..0db4f57a6a95 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: ctcmain.c,v 1.74 2005/03/24 09:04:17 mschwide Exp $ 2 * $Id: ctcmain.c,v 1.78 2005/09/07 12:18:02 pavlic Exp $
3 * 3 *
4 * CTC / ESCON network driver 4 * CTC / ESCON network driver
5 * 5 *
@@ -37,10 +37,9 @@
37 * along with this program; if not, write to the Free Software 37 * along with this program; if not, write to the Free Software
38 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 38 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
39 * 39 *
40 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.74 $ 40 * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.78 $
41 * 41 *
42 */ 42 */
43
44#undef DEBUG 43#undef DEBUG
45#include <linux/module.h> 44#include <linux/module.h>
46#include <linux/init.h> 45#include <linux/init.h>
@@ -135,7 +134,7 @@ static const char *dev_event_names[] = {
135 "TX down", 134 "TX down",
136 "Restart", 135 "Restart",
137}; 136};
138 137
139/** 138/**
140 * Events of the channel statemachine 139 * Events of the channel statemachine
141 */ 140 */
@@ -249,7 +248,7 @@ static void
249print_banner(void) 248print_banner(void)
250{ 249{
251 static int printed = 0; 250 static int printed = 0;
252 char vbuf[] = "$Revision: 1.74 $"; 251 char vbuf[] = "$Revision: 1.78 $";
253 char *version = vbuf; 252 char *version = vbuf;
254 253
255 if (printed) 254 if (printed)
@@ -334,7 +333,7 @@ static const char *ch_state_names[] = {
334 "Restarting", 333 "Restarting",
335 "Not operational", 334 "Not operational",
336}; 335};
337 336
338#ifdef DEBUG 337#ifdef DEBUG
339/** 338/**
340 * Dump header and first 16 bytes of an sk_buff for debugging purposes. 339 * Dump header and first 16 bytes of an sk_buff for debugging purposes.
@@ -671,7 +670,7 @@ static void
671fsm_action_nop(fsm_instance * fi, int event, void *arg) 670fsm_action_nop(fsm_instance * fi, int event, void *arg)
672{ 671{
673} 672}
674 673
675/** 674/**
676 * Actions for channel - statemachines. 675 * Actions for channel - statemachines.
677 *****************************************************************************/ 676 *****************************************************************************/
@@ -1514,7 +1513,6 @@ ch_action_reinit(fsm_instance *fi, int event, void *arg)
1514 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev); 1513 fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
1515} 1514}
1516 1515
1517
1518/** 1516/**
1519 * The statemachine for a channel. 1517 * The statemachine for a channel.
1520 */ 1518 */
@@ -1625,7 +1623,7 @@ static const fsm_node ch_fsm[] = {
1625}; 1623};
1626 1624
1627static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node); 1625static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node);
1628 1626
1629/** 1627/**
1630 * Functions related to setup and device detection. 1628 * Functions related to setup and device detection.
1631 *****************************************************************************/ 1629 *****************************************************************************/
@@ -1976,7 +1974,7 @@ ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1976 fsm_event(ch->fsm, CH_EVENT_IRQ, ch); 1974 fsm_event(ch->fsm, CH_EVENT_IRQ, ch);
1977 1975
1978} 1976}
1979 1977
1980/** 1978/**
1981 * Actions for interface - statemachine. 1979 * Actions for interface - statemachine.
1982 *****************************************************************************/ 1980 *****************************************************************************/
@@ -2209,13 +2207,18 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2209 int rc = 0; 2207 int rc = 0;
2210 2208
2211 DBF_TEXT(trace, 5, __FUNCTION__); 2209 DBF_TEXT(trace, 5, __FUNCTION__);
2210 /* we need to acquire the lock for testing the state
2211 * otherwise we can have an IRQ changing the state to
2212 * TXIDLE after the test but before acquiring the lock.
2213 */
2214 spin_lock_irqsave(&ch->collect_lock, saveflags);
2212 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) { 2215 if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
2213 int l = skb->len + LL_HEADER_LENGTH; 2216 int l = skb->len + LL_HEADER_LENGTH;
2214 2217
2215 spin_lock_irqsave(&ch->collect_lock, saveflags); 2218 if (ch->collect_len + l > ch->max_bufsize - 2) {
2216 if (ch->collect_len + l > ch->max_bufsize - 2) 2219 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2217 rc = -EBUSY; 2220 return -EBUSY;
2218 else { 2221 } else {
2219 atomic_inc(&skb->users); 2222 atomic_inc(&skb->users);
2220 header.length = l; 2223 header.length = l;
2221 header.type = skb->protocol; 2224 header.type = skb->protocol;
@@ -2231,7 +2234,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2231 int ccw_idx; 2234 int ccw_idx;
2232 struct sk_buff *nskb; 2235 struct sk_buff *nskb;
2233 unsigned long hi; 2236 unsigned long hi;
2234 2237 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
2235 /** 2238 /**
2236 * Protect skb against beeing free'd by upper 2239 * Protect skb against beeing free'd by upper
2237 * layers. 2240 * layers.
@@ -2256,6 +2259,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2256 if (!nskb) { 2259 if (!nskb) {
2257 atomic_dec(&skb->users); 2260 atomic_dec(&skb->users);
2258 skb_pull(skb, LL_HEADER_LENGTH + 2); 2261 skb_pull(skb, LL_HEADER_LENGTH + 2);
2262 ctc_clear_busy(ch->netdev);
2259 return -ENOMEM; 2263 return -ENOMEM;
2260 } else { 2264 } else {
2261 memcpy(skb_put(nskb, skb->len), 2265 memcpy(skb_put(nskb, skb->len),
@@ -2281,6 +2285,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2281 */ 2285 */
2282 atomic_dec(&skb->users); 2286 atomic_dec(&skb->users);
2283 skb_pull(skb, LL_HEADER_LENGTH + 2); 2287 skb_pull(skb, LL_HEADER_LENGTH + 2);
2288 ctc_clear_busy(ch->netdev);
2284 return -EBUSY; 2289 return -EBUSY;
2285 } 2290 }
2286 2291
@@ -2327,9 +2332,10 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
2327 } 2332 }
2328 } 2333 }
2329 2334
2335 ctc_clear_busy(ch->netdev);
2330 return rc; 2336 return rc;
2331} 2337}
2332 2338
2333/** 2339/**
2334 * Interface API for upper network layers 2340 * Interface API for upper network layers
2335 *****************************************************************************/ 2341 *****************************************************************************/
@@ -2421,7 +2427,6 @@ ctc_tx(struct sk_buff *skb, struct net_device * dev)
2421 dev->trans_start = jiffies; 2427 dev->trans_start = jiffies;
2422 if (transmit_skb(privptr->channel[WRITE], skb) != 0) 2428 if (transmit_skb(privptr->channel[WRITE], skb) != 0)
2423 rc = 1; 2429 rc = 1;
2424 ctc_clear_busy(dev);
2425 return rc; 2430 return rc;
2426} 2431}
2427 2432
@@ -2610,7 +2615,6 @@ stats_write(struct device *dev, struct device_attribute *attr, const char *buf,
2610 return count; 2615 return count;
2611} 2616}
2612 2617
2613
2614static void 2618static void
2615ctc_netdev_unregister(struct net_device * dev) 2619ctc_netdev_unregister(struct net_device * dev)
2616{ 2620{
@@ -2685,7 +2689,6 @@ ctc_proto_store(struct device *dev, struct device_attribute *attr, const char *b
2685 return count; 2689 return count;
2686} 2690}
2687 2691
2688
2689static ssize_t 2692static ssize_t
2690ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf) 2693ctc_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2691{ 2694{
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 3a0285669adf..2ad4797ce024 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -24,7 +24,7 @@
24 24
25#include "qeth_mpc.h" 25#include "qeth_mpc.h"
26 26
27#define VERSION_QETH_H "$Revision: 1.139 $" 27#define VERSION_QETH_H "$Revision: 1.142 $"
28 28
29#ifdef CONFIG_QETH_IPV6 29#ifdef CONFIG_QETH_IPV6
30#define QETH_VERSION_IPV6 ":IPv6" 30#define QETH_VERSION_IPV6 ":IPv6"
@@ -1172,7 +1172,7 @@ extern int
1172qeth_realloc_buffer_pool(struct qeth_card *, int); 1172qeth_realloc_buffer_pool(struct qeth_card *, int);
1173 1173
1174extern int 1174extern int
1175qeth_set_large_send(struct qeth_card *); 1175qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
1176 1176
1177extern void 1177extern void
1178qeth_fill_header(struct qeth_card *, struct qeth_hdr *, 1178qeth_fill_header(struct qeth_card *, struct qeth_hdr *,
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 79c74f3a11f5..71de834ece1a 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.214 $) 3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.224 $)
4 * 4 *
5 * Linux on zSeries OSA Express and HiperSockets support 5 * Linux on zSeries OSA Express and HiperSockets support
6 * 6 *
@@ -12,7 +12,7 @@
12 * Frank Pavlic (pavlic@de.ibm.com) and 12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com> 13 * Thomas Spatzier <tspat@de.ibm.com>
14 * 14 *
15 * $Revision: 1.214 $ $Date: 2005/05/04 20:19:18 $ 15 * $Revision: 1.224 $ $Date: 2005/05/04 20:19:18 $
16 * 16 *
17 * This program is free software; you can redistribute it and/or modify 17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by 18 * it under the terms of the GNU General Public License as published by
@@ -29,14 +29,6 @@
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */ 30 */
31 31
32/***
33 * eye catcher; just for debugging purposes
34 */
35void volatile
36qeth_eyecatcher(void)
37{
38 return;
39}
40 32
41#include <linux/config.h> 33#include <linux/config.h>
42#include <linux/module.h> 34#include <linux/module.h>
@@ -80,7 +72,7 @@ qeth_eyecatcher(void)
80#include "qeth_eddp.h" 72#include "qeth_eddp.h"
81#include "qeth_tso.h" 73#include "qeth_tso.h"
82 74
83#define VERSION_QETH_C "$Revision: 1.214 $" 75#define VERSION_QETH_C "$Revision: 1.224 $"
84static const char *version = "qeth S/390 OSA-Express driver"; 76static const char *version = "qeth S/390 OSA-Express driver";
85 77
86/** 78/**
@@ -2759,11 +2751,9 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2759 queue->card->perf_stats.outbound_do_qdio_start_time; 2751 queue->card->perf_stats.outbound_do_qdio_start_time;
2760#endif 2752#endif
2761 if (rc){ 2753 if (rc){
2762 QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO "
2763 "returned error (%i) on device %s.",
2764 rc, CARD_DDEV_ID(queue->card));
2765 QETH_DBF_TEXT(trace, 2, "flushbuf"); 2754 QETH_DBF_TEXT(trace, 2, "flushbuf");
2766 QETH_DBF_TEXT_(trace, 2, " err%d", rc); 2755 QETH_DBF_TEXT_(trace, 2, " err%d", rc);
2756 QETH_DBF_TEXT_(trace, 2, "%s", CARD_DDEV_ID(queue->card));
2767 queue->card->stats.tx_errors += count; 2757 queue->card->stats.tx_errors += count;
2768 /* this must not happen under normal circumstances. if it 2758 /* this must not happen under normal circumstances. if it
2769 * happens something is really wrong -> recover */ 2759 * happens something is really wrong -> recover */
@@ -2909,11 +2899,8 @@ qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status,
2909 QETH_DBF_TEXT(trace, 6, "qdouhdl"); 2899 QETH_DBF_TEXT(trace, 6, "qdouhdl");
2910 if (status & QDIO_STATUS_LOOK_FOR_ERROR) { 2900 if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
2911 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){ 2901 if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){
2912 QETH_DBF_SPRINTF(trace, 2, "On device %s: " 2902 QETH_DBF_TEXT(trace, 2, "achkcond");
2913 "received active check " 2903 QETH_DBF_TEXT_(trace, 2, "%s", CARD_BUS_ID(card));
2914 "condition (0x%08x).",
2915 CARD_BUS_ID(card), status);
2916 QETH_DBF_TEXT(trace, 2, "chkcond");
2917 QETH_DBF_TEXT_(trace, 2, "%08x", status); 2904 QETH_DBF_TEXT_(trace, 2, "%08x", status);
2918 netif_stop_queue(card->dev); 2905 netif_stop_queue(card->dev);
2919 qeth_schedule_recovery(card); 2906 qeth_schedule_recovery(card);
@@ -3356,26 +3343,32 @@ qeth_halt_channel(struct qeth_channel *channel)
3356static int 3343static int
3357qeth_halt_channels(struct qeth_card *card) 3344qeth_halt_channels(struct qeth_card *card)
3358{ 3345{
3359 int rc = 0; 3346 int rc1 = 0, rc2=0, rc3 = 0;
3360 3347
3361 QETH_DBF_TEXT(trace,3,"haltchs"); 3348 QETH_DBF_TEXT(trace,3,"haltchs");
3362 if ((rc = qeth_halt_channel(&card->read))) 3349 rc1 = qeth_halt_channel(&card->read);
3363 return rc; 3350 rc2 = qeth_halt_channel(&card->write);
3364 if ((rc = qeth_halt_channel(&card->write))) 3351 rc3 = qeth_halt_channel(&card->data);
3365 return rc; 3352 if (rc1)
3366 return qeth_halt_channel(&card->data); 3353 return rc1;
3354 if (rc2)
3355 return rc2;
3356 return rc3;
3367} 3357}
3368static int 3358static int
3369qeth_clear_channels(struct qeth_card *card) 3359qeth_clear_channels(struct qeth_card *card)
3370{ 3360{
3371 int rc = 0; 3361 int rc1 = 0, rc2=0, rc3 = 0;
3372 3362
3373 QETH_DBF_TEXT(trace,3,"clearchs"); 3363 QETH_DBF_TEXT(trace,3,"clearchs");
3374 if ((rc = qeth_clear_channel(&card->read))) 3364 rc1 = qeth_clear_channel(&card->read);
3375 return rc; 3365 rc2 = qeth_clear_channel(&card->write);
3376 if ((rc = qeth_clear_channel(&card->write))) 3366 rc3 = qeth_clear_channel(&card->data);
3377 return rc; 3367 if (rc1)
3378 return qeth_clear_channel(&card->data); 3368 return rc1;
3369 if (rc2)
3370 return rc2;
3371 return rc3;
3379} 3372}
3380 3373
3381static int 3374static int
@@ -3445,23 +3438,23 @@ qeth_mpc_initialize(struct qeth_card *card)
3445 } 3438 }
3446 if ((rc = qeth_cm_enable(card))){ 3439 if ((rc = qeth_cm_enable(card))){
3447 QETH_DBF_TEXT_(setup, 2, "2err%d", rc); 3440 QETH_DBF_TEXT_(setup, 2, "2err%d", rc);
3448 return rc; 3441 goto out_qdio;
3449 } 3442 }
3450 if ((rc = qeth_cm_setup(card))){ 3443 if ((rc = qeth_cm_setup(card))){
3451 QETH_DBF_TEXT_(setup, 2, "3err%d", rc); 3444 QETH_DBF_TEXT_(setup, 2, "3err%d", rc);
3452 return rc; 3445 goto out_qdio;
3453 } 3446 }
3454 if ((rc = qeth_ulp_enable(card))){ 3447 if ((rc = qeth_ulp_enable(card))){
3455 QETH_DBF_TEXT_(setup, 2, "4err%d", rc); 3448 QETH_DBF_TEXT_(setup, 2, "4err%d", rc);
3456 return rc; 3449 goto out_qdio;
3457 } 3450 }
3458 if ((rc = qeth_ulp_setup(card))){ 3451 if ((rc = qeth_ulp_setup(card))){
3459 QETH_DBF_TEXT_(setup, 2, "5err%d", rc); 3452 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3460 return rc; 3453 goto out_qdio;
3461 } 3454 }
3462 if ((rc = qeth_alloc_qdio_buffers(card))){ 3455 if ((rc = qeth_alloc_qdio_buffers(card))){
3463 QETH_DBF_TEXT_(setup, 2, "5err%d", rc); 3456 QETH_DBF_TEXT_(setup, 2, "5err%d", rc);
3464 return rc; 3457 goto out_qdio;
3465 } 3458 }
3466 if ((rc = qeth_qdio_establish(card))){ 3459 if ((rc = qeth_qdio_establish(card))){
3467 QETH_DBF_TEXT_(setup, 2, "6err%d", rc); 3460 QETH_DBF_TEXT_(setup, 2, "6err%d", rc);
@@ -3795,12 +3788,16 @@ static inline int
3795qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb, 3788qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3796 struct qeth_hdr **hdr, int ipv) 3789 struct qeth_hdr **hdr, int ipv)
3797{ 3790{
3791 int rc;
3798#ifdef CONFIG_QETH_VLAN 3792#ifdef CONFIG_QETH_VLAN
3799 u16 *tag; 3793 u16 *tag;
3800#endif 3794#endif
3801 3795
3802 QETH_DBF_TEXT(trace, 6, "prepskb"); 3796 QETH_DBF_TEXT(trace, 6, "prepskb");
3803 3797
3798 rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
3799 if (rc)
3800 return rc;
3804#ifdef CONFIG_QETH_VLAN 3801#ifdef CONFIG_QETH_VLAN
3805 if (card->vlangrp && vlan_tx_tag_present(*skb) && 3802 if (card->vlangrp && vlan_tx_tag_present(*skb) &&
3806 ((ipv == 6) || card->options.layer2) ) { 3803 ((ipv == 6) || card->options.layer2) ) {
@@ -4251,7 +4248,8 @@ out:
4251} 4248}
4252 4249
4253static inline int 4250static inline int
4254qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb) 4251qeth_get_elements_no(struct qeth_card *card, void *hdr,
4252 struct sk_buff *skb, int elems)
4255{ 4253{
4256 int elements_needed = 0; 4254 int elements_needed = 0;
4257 4255
@@ -4261,9 +4259,10 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
4261 if (elements_needed == 0 ) 4259 if (elements_needed == 0 )
4262 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) 4260 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
4263 + skb->len) >> PAGE_SHIFT); 4261 + skb->len) >> PAGE_SHIFT);
4264 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){ 4262 if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){
4265 PRINT_ERR("qeth_do_send_packet: invalid size of " 4263 PRINT_ERR("qeth_do_send_packet: invalid size of "
4266 "IP packet. Discarded."); 4264 "IP packet (Number=%d / Length=%d). Discarded.\n",
4265 (elements_needed+elems), skb->len);
4267 return 0; 4266 return 0;
4268 } 4267 }
4269 return elements_needed; 4268 return elements_needed;
@@ -4275,7 +4274,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4275 int ipv = 0; 4274 int ipv = 0;
4276 int cast_type; 4275 int cast_type;
4277 struct qeth_qdio_out_q *queue; 4276 struct qeth_qdio_out_q *queue;
4278 struct qeth_hdr *hdr; 4277 struct qeth_hdr *hdr = NULL;
4279 int elements_needed = 0; 4278 int elements_needed = 0;
4280 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 4279 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
4281 struct qeth_eddp_context *ctx = NULL; 4280 struct qeth_eddp_context *ctx = NULL;
@@ -4337,9 +4336,11 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4337 return -EINVAL; 4336 return -EINVAL;
4338 } 4337 }
4339 } else { 4338 } else {
4340 elements_needed += qeth_get_elements_no(card,(void*) hdr, skb); 4339 int elems = qeth_get_elements_no(card,(void*) hdr, skb,
4341 if (!elements_needed) 4340 elements_needed);
4341 if (!elems)
4342 return -EINVAL; 4342 return -EINVAL;
4343 elements_needed += elems;
4343 } 4344 }
4344 4345
4345 if (card->info.type != QETH_CARD_TYPE_IQD) 4346 if (card->info.type != QETH_CARD_TYPE_IQD)
@@ -4504,7 +4505,11 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4504 4505
4505 QETH_DBF_TEXT(trace,3,"arpstnoe"); 4506 QETH_DBF_TEXT(trace,3,"arpstnoe");
4506 4507
4507 /* TODO: really not supported by GuestLAN? */ 4508 /*
4509 * currently GuestLAN only supports the ARP assist function
4510 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
4511 * thus we say EOPNOTSUPP for this ARP function
4512 */
4508 if (card->info.guestlan) 4513 if (card->info.guestlan)
4509 return -EOPNOTSUPP; 4514 return -EOPNOTSUPP;
4510 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { 4515 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
@@ -4681,14 +4686,6 @@ qeth_arp_query(struct qeth_card *card, char *udata)
4681 4686
4682 QETH_DBF_TEXT(trace,3,"arpquery"); 4687 QETH_DBF_TEXT(trace,3,"arpquery");
4683 4688
4684 /*
4685 * currently GuestLAN does only deliver all zeros on query arp,
4686 * even though arp processing is supported (according to IPA supp.
4687 * funcs flags); since all zeros is no valueable information,
4688 * we say EOPNOTSUPP for all ARP functions
4689 */
4690 /*if (card->info.guestlan)
4691 return -EOPNOTSUPP; */
4692 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ 4689 if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
4693 IPA_ARP_PROCESSING)) { 4690 IPA_ARP_PROCESSING)) {
4694 PRINT_WARN("ARP processing not supported " 4691 PRINT_WARN("ARP processing not supported "
@@ -4894,10 +4891,9 @@ qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry)
4894 QETH_DBF_TEXT(trace,3,"arpadent"); 4891 QETH_DBF_TEXT(trace,3,"arpadent");
4895 4892
4896 /* 4893 /*
4897 * currently GuestLAN does only deliver all zeros on query arp, 4894 * currently GuestLAN only supports the ARP assist function
4898 * even though arp processing is supported (according to IPA supp. 4895 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
4899 * funcs flags); since all zeros is no valueable information, 4896 * thus we say EOPNOTSUPP for this ARP function
4900 * we say EOPNOTSUPP for all ARP functions
4901 */ 4897 */
4902 if (card->info.guestlan) 4898 if (card->info.guestlan)
4903 return -EOPNOTSUPP; 4899 return -EOPNOTSUPP;
@@ -4937,10 +4933,9 @@ qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry
4937 QETH_DBF_TEXT(trace,3,"arprment"); 4933 QETH_DBF_TEXT(trace,3,"arprment");
4938 4934
4939 /* 4935 /*
4940 * currently GuestLAN does only deliver all zeros on query arp, 4936 * currently GuestLAN only supports the ARP assist function
4941 * even though arp processing is supported (according to IPA supp. 4937 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
4942 * funcs flags); since all zeros is no valueable information, 4938 * thus we say EOPNOTSUPP for this ARP function
4943 * we say EOPNOTSUPP for all ARP functions
4944 */ 4939 */
4945 if (card->info.guestlan) 4940 if (card->info.guestlan)
4946 return -EOPNOTSUPP; 4941 return -EOPNOTSUPP;
@@ -4978,11 +4973,10 @@ qeth_arp_flush_cache(struct qeth_card *card)
4978 QETH_DBF_TEXT(trace,3,"arpflush"); 4973 QETH_DBF_TEXT(trace,3,"arpflush");
4979 4974
4980 /* 4975 /*
4981 * currently GuestLAN does only deliver all zeros on query arp, 4976 * currently GuestLAN only supports the ARP assist function
4982 * even though arp processing is supported (according to IPA supp. 4977 * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
4983 * funcs flags); since all zeros is no valueable information, 4978 * thus we say EOPNOTSUPP for this ARP function
4984 * we say EOPNOTSUPP for all ARP functions 4979 */
4985 */
4986 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) 4980 if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
4987 return -EOPNOTSUPP; 4981 return -EOPNOTSUPP;
4988 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { 4982 if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) {
@@ -5206,7 +5200,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5206 if (!card->vlangrp) 5200 if (!card->vlangrp)
5207 return; 5201 return;
5208 rcu_read_lock(); 5202 rcu_read_lock();
5209 in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]); 5203 in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
5210 if (!in_dev) 5204 if (!in_dev)
5211 goto out; 5205 goto out;
5212 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 5206 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
@@ -7038,14 +7032,16 @@ qeth_setrouting_v6(struct qeth_card *card)
7038} 7032}
7039 7033
7040int 7034int
7041qeth_set_large_send(struct qeth_card *card) 7035qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type)
7042{ 7036{
7043 int rc = 0; 7037 int rc = 0;
7044 7038
7045 if (card->dev == NULL) 7039 if (card->dev == NULL) {
7040 card->options.large_send = type;
7046 return 0; 7041 return 0;
7047 7042 }
7048 netif_stop_queue(card->dev); 7043 netif_stop_queue(card->dev);
7044 card->options.large_send = type;
7049 switch (card->options.large_send) { 7045 switch (card->options.large_send) {
7050 case QETH_LARGE_SEND_EDDP: 7046 case QETH_LARGE_SEND_EDDP:
7051 card->dev->features |= NETIF_F_TSO | NETIF_F_SG; 7047 card->dev->features |= NETIF_F_TSO | NETIF_F_SG;
@@ -7066,7 +7062,6 @@ qeth_set_large_send(struct qeth_card *card)
7066 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); 7062 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG);
7067 break; 7063 break;
7068 } 7064 }
7069
7070 netif_wake_queue(card->dev); 7065 netif_wake_queue(card->dev);
7071 return rc; 7066 return rc;
7072} 7067}
@@ -7730,7 +7725,7 @@ qeth_arp_constructor(struct neighbour *neigh)
7730 goto out; 7725 goto out;
7731 7726
7732 rcu_read_lock(); 7727 rcu_read_lock();
7733 in_dev = rcu_dereference(__in_dev_get(dev)); 7728 in_dev = __in_dev_get_rcu(dev);
7734 if (in_dev == NULL) { 7729 if (in_dev == NULL) {
7735 rcu_read_unlock(); 7730 rcu_read_unlock();
7736 return -EINVAL; 7731 return -EINVAL;
@@ -8257,7 +8252,6 @@ qeth_init(void)
8257{ 8252{
8258 int rc=0; 8253 int rc=0;
8259 8254
8260 qeth_eyecatcher();
8261 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n", 8255 PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n",
8262 version, VERSION_QETH_C, VERSION_QETH_H, 8256 version, VERSION_QETH_C, VERSION_QETH_H,
8263 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C, 8257 VERSION_QETH_MPC_H, VERSION_QETH_MPC_C,
@@ -8338,7 +8332,6 @@ again:
8338 printk("qeth: removed\n"); 8332 printk("qeth: removed\n");
8339} 8333}
8340 8334
8341EXPORT_SYMBOL(qeth_eyecatcher);
8342module_init(qeth_init); 8335module_init(qeth_init);
8343module_exit(qeth_exit); 8336module_exit(qeth_exit);
8344MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>"); 8337MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 98bedb0cb387..dda105b73063 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.51 $) 3 * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.54 $)
4 * 4 *
5 * Linux on zSeries OSA Express and HiperSockets support 5 * Linux on zSeries OSA Express and HiperSockets support
6 * This file contains code related to sysfs. 6 * This file contains code related to sysfs.
@@ -20,7 +20,7 @@
20#include "qeth_mpc.h" 20#include "qeth_mpc.h"
21#include "qeth_fs.h" 21#include "qeth_fs.h"
22 22
23const char *VERSION_QETH_SYS_C = "$Revision: 1.51 $"; 23const char *VERSION_QETH_SYS_C = "$Revision: 1.54 $";
24 24
25/*****************************************************************************/ 25/*****************************************************************************/
26/* */ 26/* */
@@ -722,10 +722,13 @@ qeth_dev_layer2_store(struct device *dev, struct device_attribute *attr, const c
722 722
723 if (!card) 723 if (!card)
724 return -EINVAL; 724 return -EINVAL;
725 if (card->info.type == QETH_CARD_TYPE_IQD) {
726 PRINT_WARN("Layer2 on Hipersockets is not supported! \n");
727 return -EPERM;
728 }
725 729
726 if (((card->state != CARD_STATE_DOWN) && 730 if (((card->state != CARD_STATE_DOWN) &&
727 (card->state != CARD_STATE_RECOVER)) || 731 (card->state != CARD_STATE_RECOVER)))
728 (card->info.type != QETH_CARD_TYPE_OSAE))
729 return -EPERM; 732 return -EPERM;
730 733
731 i = simple_strtoul(buf, &tmp, 16); 734 i = simple_strtoul(buf, &tmp, 16);
@@ -771,9 +774,7 @@ qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, con
771 774
772 if (!card) 775 if (!card)
773 return -EINVAL; 776 return -EINVAL;
774
775 tmp = strsep((char **) &buf, "\n"); 777 tmp = strsep((char **) &buf, "\n");
776
777 if (!strcmp(tmp, "no")){ 778 if (!strcmp(tmp, "no")){
778 type = QETH_LARGE_SEND_NO; 779 type = QETH_LARGE_SEND_NO;
779 } else if (!strcmp(tmp, "EDDP")) { 780 } else if (!strcmp(tmp, "EDDP")) {
@@ -786,10 +787,8 @@ qeth_dev_large_send_store(struct device *dev, struct device_attribute *attr, con
786 } 787 }
787 if (card->options.large_send == type) 788 if (card->options.large_send == type)
788 return count; 789 return count;
789 card->options.large_send = type; 790 if ((rc = qeth_set_large_send(card, type)))
790 if ((rc = qeth_set_large_send(card)))
791 return rc; 791 return rc;
792
793 return count; 792 return count;
794} 793}
795 794
diff --git a/drivers/s390/scsi/Makefile b/drivers/s390/scsi/Makefile
index fc145307a7d4..d6a78f1a2f16 100644
--- a/drivers/s390/scsi/Makefile
+++ b/drivers/s390/scsi/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \ 5zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
6 zfcp_fsf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \ 6 zfcp_fsf.o zfcp_dbf.o zfcp_sysfs_adapter.o zfcp_sysfs_port.o \
7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o 7 zfcp_sysfs_unit.o zfcp_sysfs_driver.o
8 8
9obj-$(CONFIG_ZFCP) += zfcp.o 9obj-$(CONFIG_ZFCP) += zfcp.o
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index bfe3ba73bc0f..0b5087f7cabc 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,95 +122,6 @@ _zfcp_hex_dump(char *addr, int count)
122 122
123#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER 123#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
124 124
125static inline int
126zfcp_fsf_req_is_scsi_cmnd(struct zfcp_fsf_req *fsf_req)
127{
128 return ((fsf_req->fsf_command == FSF_QTCB_FCP_CMND) &&
129 !(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT));
130}
131
132void
133zfcp_cmd_dbf_event_fsf(const char *text, struct zfcp_fsf_req *fsf_req,
134 void *add_data, int add_length)
135{
136 struct zfcp_adapter *adapter = fsf_req->adapter;
137 struct scsi_cmnd *scsi_cmnd;
138 int level = 3;
139 int i;
140 unsigned long flags;
141
142 spin_lock_irqsave(&adapter->dbf_lock, flags);
143 if (zfcp_fsf_req_is_scsi_cmnd(fsf_req)) {
144 scsi_cmnd = fsf_req->data.send_fcp_command_task.scsi_cmnd;
145 debug_text_event(adapter->cmd_dbf, level, "fsferror");
146 debug_text_event(adapter->cmd_dbf, level, text);
147 debug_event(adapter->cmd_dbf, level, &fsf_req,
148 sizeof (unsigned long));
149 debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
150 sizeof (u32));
151 debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
152 sizeof (unsigned long));
153 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
154 min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
155 for (i = 0; i < add_length; i += ZFCP_CMD_DBF_LENGTH)
156 debug_event(adapter->cmd_dbf,
157 level,
158 (char *) add_data + i,
159 min(ZFCP_CMD_DBF_LENGTH, add_length - i));
160 }
161 spin_unlock_irqrestore(&adapter->dbf_lock, flags);
162}
163
164/* XXX additionally log unit if available */
165/* ---> introduce new parameter for unit, see 2.4 code */
166void
167zfcp_cmd_dbf_event_scsi(const char *text, struct scsi_cmnd *scsi_cmnd)
168{
169 struct zfcp_adapter *adapter;
170 union zfcp_req_data *req_data;
171 struct zfcp_fsf_req *fsf_req;
172 int level = ((host_byte(scsi_cmnd->result) != 0) ? 1 : 5);
173 unsigned long flags;
174
175 adapter = (struct zfcp_adapter *) scsi_cmnd->device->host->hostdata[0];
176 req_data = (union zfcp_req_data *) scsi_cmnd->host_scribble;
177 fsf_req = (req_data ? req_data->send_fcp_command_task.fsf_req : NULL);
178 spin_lock_irqsave(&adapter->dbf_lock, flags);
179 debug_text_event(adapter->cmd_dbf, level, "hostbyte");
180 debug_text_event(adapter->cmd_dbf, level, text);
181 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->result, sizeof (u32));
182 debug_event(adapter->cmd_dbf, level, &scsi_cmnd,
183 sizeof (unsigned long));
184 debug_event(adapter->cmd_dbf, level, &scsi_cmnd->cmnd,
185 min(ZFCP_CMD_DBF_LENGTH, (int)scsi_cmnd->cmd_len));
186 if (likely(fsf_req)) {
187 debug_event(adapter->cmd_dbf, level, &fsf_req,
188 sizeof (unsigned long));
189 debug_event(adapter->cmd_dbf, level, &fsf_req->seq_no,
190 sizeof (u32));
191 } else {
192 debug_text_event(adapter->cmd_dbf, level, "");
193 debug_text_event(adapter->cmd_dbf, level, "");
194 }
195 spin_unlock_irqrestore(&adapter->dbf_lock, flags);
196}
197
198void
199zfcp_in_els_dbf_event(struct zfcp_adapter *adapter, const char *text,
200 struct fsf_status_read_buffer *status_buffer, int length)
201{
202 int level = 1;
203 int i;
204
205 debug_text_event(adapter->in_els_dbf, level, text);
206 debug_event(adapter->in_els_dbf, level, &status_buffer->d_id, 8);
207 for (i = 0; i < length; i += ZFCP_IN_ELS_DBF_LENGTH)
208 debug_event(adapter->in_els_dbf,
209 level,
210 (char *) status_buffer->payload + i,
211 min(ZFCP_IN_ELS_DBF_LENGTH, length - i));
212}
213
214/** 125/**
215 * zfcp_device_setup - setup function 126 * zfcp_device_setup - setup function
216 * @str: pointer to parameter string 127 * @str: pointer to parameter string
@@ -1017,81 +928,6 @@ zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
1017 mempool_destroy(adapter->pool.data_gid_pn); 928 mempool_destroy(adapter->pool.data_gid_pn);
1018} 929}
1019 930
1020/**
1021 * zfcp_adapter_debug_register - registers debug feature for an adapter
1022 * @adapter: pointer to adapter for which debug features should be registered
1023 * return: -ENOMEM on error, 0 otherwise
1024 */
1025int
1026zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
1027{
1028 char dbf_name[20];
1029
1030 /* debug feature area which records SCSI command failures (hostbyte) */
1031 spin_lock_init(&adapter->dbf_lock);
1032
1033 sprintf(dbf_name, ZFCP_CMD_DBF_NAME "%s",
1034 zfcp_get_busid_by_adapter(adapter));
1035 adapter->cmd_dbf = debug_register(dbf_name, ZFCP_CMD_DBF_INDEX,
1036 ZFCP_CMD_DBF_AREAS,
1037 ZFCP_CMD_DBF_LENGTH);
1038 debug_register_view(adapter->cmd_dbf, &debug_hex_ascii_view);
1039 debug_set_level(adapter->cmd_dbf, ZFCP_CMD_DBF_LEVEL);
1040
1041 /* debug feature area which records SCSI command aborts */
1042 sprintf(dbf_name, ZFCP_ABORT_DBF_NAME "%s",
1043 zfcp_get_busid_by_adapter(adapter));
1044 adapter->abort_dbf = debug_register(dbf_name, ZFCP_ABORT_DBF_INDEX,
1045 ZFCP_ABORT_DBF_AREAS,
1046 ZFCP_ABORT_DBF_LENGTH);
1047 debug_register_view(adapter->abort_dbf, &debug_hex_ascii_view);
1048 debug_set_level(adapter->abort_dbf, ZFCP_ABORT_DBF_LEVEL);
1049
1050 /* debug feature area which records incoming ELS commands */
1051 sprintf(dbf_name, ZFCP_IN_ELS_DBF_NAME "%s",
1052 zfcp_get_busid_by_adapter(adapter));
1053 adapter->in_els_dbf = debug_register(dbf_name, ZFCP_IN_ELS_DBF_INDEX,
1054 ZFCP_IN_ELS_DBF_AREAS,
1055 ZFCP_IN_ELS_DBF_LENGTH);
1056 debug_register_view(adapter->in_els_dbf, &debug_hex_ascii_view);
1057 debug_set_level(adapter->in_els_dbf, ZFCP_IN_ELS_DBF_LEVEL);
1058
1059 /* debug feature area which records erp events */
1060 sprintf(dbf_name, ZFCP_ERP_DBF_NAME "%s",
1061 zfcp_get_busid_by_adapter(adapter));
1062 adapter->erp_dbf = debug_register(dbf_name, ZFCP_ERP_DBF_INDEX,
1063 ZFCP_ERP_DBF_AREAS,
1064 ZFCP_ERP_DBF_LENGTH);
1065 debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
1066 debug_set_level(adapter->erp_dbf, ZFCP_ERP_DBF_LEVEL);
1067
1068 if (!(adapter->cmd_dbf && adapter->abort_dbf &&
1069 adapter->in_els_dbf && adapter->erp_dbf)) {
1070 zfcp_adapter_debug_unregister(adapter);
1071 return -ENOMEM;
1072 }
1073
1074 return 0;
1075
1076}
1077
1078/**
1079 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
1080 * @adapter: pointer to adapter for which debug features should be unregistered
1081 */
1082void
1083zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
1084{
1085 debug_unregister(adapter->abort_dbf);
1086 debug_unregister(adapter->cmd_dbf);
1087 debug_unregister(adapter->erp_dbf);
1088 debug_unregister(adapter->in_els_dbf);
1089 adapter->abort_dbf = NULL;
1090 adapter->cmd_dbf = NULL;
1091 adapter->erp_dbf = NULL;
1092 adapter->in_els_dbf = NULL;
1093}
1094
1095void 931void
1096zfcp_dummy_release(struct device *dev) 932zfcp_dummy_release(struct device *dev)
1097{ 933{
@@ -1462,10 +1298,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1462 /* see FC-FS */ 1298 /* see FC-FS */
1463 no_entries = (fcp_rscn_head->payload_len / 4); 1299 no_entries = (fcp_rscn_head->payload_len / 4);
1464 1300
1465 zfcp_in_els_dbf_event(adapter, "##rscn", status_buffer,
1466 fcp_rscn_head->payload_len);
1467
1468 debug_text_event(adapter->erp_dbf, 1, "unsol_els_rscn:");
1469 for (i = 1; i < no_entries; i++) { 1301 for (i = 1; i < no_entries; i++) {
1470 /* skip head and start with 1st element */ 1302 /* skip head and start with 1st element */
1471 fcp_rscn_element++; 1303 fcp_rscn_element++;
@@ -1497,8 +1329,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1497 (ZFCP_STATUS_PORT_DID_DID, &port->status)) { 1329 (ZFCP_STATUS_PORT_DID_DID, &port->status)) {
1498 ZFCP_LOG_INFO("incoming RSCN, trying to open " 1330 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1499 "port 0x%016Lx\n", port->wwpn); 1331 "port 0x%016Lx\n", port->wwpn);
1500 debug_text_event(adapter->erp_dbf, 1,
1501 "unsol_els_rscnu:");
1502 zfcp_erp_port_reopen(port, 1332 zfcp_erp_port_reopen(port,
1503 ZFCP_STATUS_COMMON_ERP_FAILED); 1333 ZFCP_STATUS_COMMON_ERP_FAILED);
1504 continue; 1334 continue;
@@ -1524,8 +1354,6 @@ zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1524 */ 1354 */
1525 ZFCP_LOG_INFO("incoming RSCN, trying to open " 1355 ZFCP_LOG_INFO("incoming RSCN, trying to open "
1526 "port 0x%016Lx\n", port->wwpn); 1356 "port 0x%016Lx\n", port->wwpn);
1527 debug_text_event(adapter->erp_dbf, 1,
1528 "unsol_els_rscnk:");
1529 zfcp_test_link(port); 1357 zfcp_test_link(port);
1530 } 1358 }
1531 } 1359 }
@@ -1541,8 +1369,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
1541 struct zfcp_port *port; 1369 struct zfcp_port *port;
1542 unsigned long flags; 1370 unsigned long flags;
1543 1371
1544 zfcp_in_els_dbf_event(adapter, "##plogi", status_buffer, 28);
1545
1546 read_lock_irqsave(&zfcp_data.config_lock, flags); 1372 read_lock_irqsave(&zfcp_data.config_lock, flags);
1547 list_for_each_entry(port, &adapter->port_list_head, list) { 1373 list_for_each_entry(port, &adapter->port_list_head, list) {
1548 if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn)) 1374 if (port->wwpn == (*(wwn_t *) & els_logi->nport_wwn))
@@ -1556,8 +1382,6 @@ zfcp_fsf_incoming_els_plogi(struct zfcp_adapter *adapter,
1556 status_buffer->d_id, 1382 status_buffer->d_id,
1557 zfcp_get_busid_by_adapter(adapter)); 1383 zfcp_get_busid_by_adapter(adapter));
1558 } else { 1384 } else {
1559 debug_text_event(adapter->erp_dbf, 1, "unsol_els_plogi:");
1560 debug_event(adapter->erp_dbf, 1, &els_logi->nport_wwn, 8);
1561 zfcp_erp_port_forced_reopen(port, 0); 1385 zfcp_erp_port_forced_reopen(port, 0);
1562 } 1386 }
1563} 1387}
@@ -1570,8 +1394,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
1570 struct zfcp_port *port; 1394 struct zfcp_port *port;
1571 unsigned long flags; 1395 unsigned long flags;
1572 1396
1573 zfcp_in_els_dbf_event(adapter, "##logo", status_buffer, 16);
1574
1575 read_lock_irqsave(&zfcp_data.config_lock, flags); 1397 read_lock_irqsave(&zfcp_data.config_lock, flags);
1576 list_for_each_entry(port, &adapter->port_list_head, list) { 1398 list_for_each_entry(port, &adapter->port_list_head, list) {
1577 if (port->wwpn == els_logo->nport_wwpn) 1399 if (port->wwpn == els_logo->nport_wwpn)
@@ -1585,8 +1407,6 @@ zfcp_fsf_incoming_els_logo(struct zfcp_adapter *adapter,
1585 status_buffer->d_id, 1407 status_buffer->d_id,
1586 zfcp_get_busid_by_adapter(adapter)); 1408 zfcp_get_busid_by_adapter(adapter));
1587 } else { 1409 } else {
1588 debug_text_event(adapter->erp_dbf, 1, "unsol_els_logo:");
1589 debug_event(adapter->erp_dbf, 1, &els_logo->nport_wwpn, 8);
1590 zfcp_erp_port_forced_reopen(port, 0); 1410 zfcp_erp_port_forced_reopen(port, 0);
1591 } 1411 }
1592} 1412}
@@ -1595,7 +1415,6 @@ static void
1595zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter, 1415zfcp_fsf_incoming_els_unknown(struct zfcp_adapter *adapter,
1596 struct fsf_status_read_buffer *status_buffer) 1416 struct fsf_status_read_buffer *status_buffer)
1597{ 1417{
1598 zfcp_in_els_dbf_event(adapter, "##undef", status_buffer, 24);
1599 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x " 1418 ZFCP_LOG_NORMAL("warning: unknown incoming ELS 0x%08x "
1600 "for adapter %s\n", *(u32 *) (status_buffer->payload), 1419 "for adapter %s\n", *(u32 *) (status_buffer->payload),
1601 zfcp_get_busid_by_adapter(adapter)); 1420 zfcp_get_busid_by_adapter(adapter));
@@ -1609,10 +1428,11 @@ zfcp_fsf_incoming_els(struct zfcp_fsf_req *fsf_req)
1609 u32 els_type; 1428 u32 els_type;
1610 struct zfcp_adapter *adapter; 1429 struct zfcp_adapter *adapter;
1611 1430
1612 status_buffer = fsf_req->data.status_read.buffer; 1431 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
1613 els_type = *(u32 *) (status_buffer->payload); 1432 els_type = *(u32 *) (status_buffer->payload);
1614 adapter = fsf_req->adapter; 1433 adapter = fsf_req->adapter;
1615 1434
1435 zfcp_san_dbf_event_incoming_els(fsf_req);
1616 if (els_type == LS_PLOGI) 1436 if (els_type == LS_PLOGI)
1617 zfcp_fsf_incoming_els_plogi(adapter, status_buffer); 1437 zfcp_fsf_incoming_els_plogi(adapter, status_buffer);
1618 else if (els_type == LS_LOGO) 1438 else if (els_type == LS_LOGO)
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index b30abab77da3..0fc46381fc22 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -202,19 +202,9 @@ static int
202zfcp_ccw_set_offline(struct ccw_device *ccw_device) 202zfcp_ccw_set_offline(struct ccw_device *ccw_device)
203{ 203{
204 struct zfcp_adapter *adapter; 204 struct zfcp_adapter *adapter;
205 struct zfcp_port *port;
206 struct fc_rport *rport;
207 205
208 down(&zfcp_data.config_sema); 206 down(&zfcp_data.config_sema);
209 adapter = dev_get_drvdata(&ccw_device->dev); 207 adapter = dev_get_drvdata(&ccw_device->dev);
210 /* might be racy, but we cannot take config_lock due to the fact that
211 fc_remote_port_delete might sleep */
212 list_for_each_entry(port, &adapter->port_list_head, list)
213 if (port->rport) {
214 rport = port->rport;
215 port->rport = NULL;
216 fc_remote_port_delete(rport);
217 }
218 zfcp_erp_adapter_shutdown(adapter, 0); 208 zfcp_erp_adapter_shutdown(adapter, 0);
219 zfcp_erp_wait(adapter); 209 zfcp_erp_wait(adapter);
220 zfcp_adapter_scsi_unregister(adapter); 210 zfcp_adapter_scsi_unregister(adapter);
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
new file mode 100644
index 000000000000..826fb3b00605
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -0,0 +1,995 @@
1/*
2 *
3 * linux/drivers/s390/scsi/zfcp_dbf.c
4 *
5 * FCP adapter driver for IBM eServer zSeries
6 *
7 * Debugging facilities
8 *
9 * (C) Copyright IBM Corp. 2005
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#define ZFCP_DBF_REVISION "$Revision$"
27
28#include <asm/debug.h>
29#include <linux/ctype.h>
30#include "zfcp_ext.h"
31
32static u32 dbfsize = 4;
33
34module_param(dbfsize, uint, 0400);
35MODULE_PARM_DESC(dbfsize,
36 "number of pages for each debug feature area (default 4)");
37
38#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
39
40static inline int
41zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
42{
43 unsigned long long sec;
44 struct timespec xtime;
45 int len = 0;
46
47 stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
48 sec = stck >> 12;
49 do_div(sec, 1000000);
50 xtime.tv_sec = sec;
51 stck -= (sec * 1000000) << 12;
52 xtime.tv_nsec = ((stck * 1000) >> 12);
53 len += sprintf(out_buf + len, "%-24s%011lu:%06lu\n",
54 label, xtime.tv_sec, xtime.tv_nsec);
55
56 return len;
57}
58
59static int zfcp_dbf_tag(char *out_buf, const char *label, const char *tag)
60{
61 int len = 0, i;
62
63 len += sprintf(out_buf + len, "%-24s", label);
64 for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++)
65 len += sprintf(out_buf + len, "%c", tag[i]);
66 len += sprintf(out_buf + len, "\n");
67
68 return len;
69}
70
71static int
72zfcp_dbf_view(char *out_buf, const char *label, const char *format, ...)
73{
74 va_list arg;
75 int len = 0;
76
77 len += sprintf(out_buf + len, "%-24s", label);
78 va_start(arg, format);
79 len += vsprintf(out_buf + len, format, arg);
80 va_end(arg);
81 len += sprintf(out_buf + len, "\n");
82
83 return len;
84}
85
86static int
87zfcp_dbf_view_dump(char *out_buf, const char *label,
88 char *buffer, int buflen, int offset, int total_size)
89{
90 int len = 0;
91
92 if (offset == 0)
93 len += sprintf(out_buf + len, "%-24s ", label);
94
95 while (buflen--) {
96 if (offset > 0) {
97 if ((offset % 32) == 0)
98 len += sprintf(out_buf + len, "\n%-24c ", ' ');
99 else if ((offset % 4) == 0)
100 len += sprintf(out_buf + len, " ");
101 }
102 len += sprintf(out_buf + len, "%02x", *buffer++);
103 if (++offset == total_size) {
104 len += sprintf(out_buf + len, "\n");
105 break;
106 }
107 }
108
109 if (total_size == 0)
110 len += sprintf(out_buf + len, "\n");
111
112 return len;
113}
114
115static inline int
116zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
117 debug_entry_t * entry, char *out_buf)
118{
119 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry);
120 int len = 0;
121
122 if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
123 len += zfcp_dbf_stck(out_buf + len, "timestamp",
124 entry->id.stck);
125 len += zfcp_dbf_view(out_buf + len, "cpu", "%02i",
126 entry->id.fields.cpuid);
127 } else {
128 len += zfcp_dbf_view_dump(out_buf + len, NULL,
129 dump->data,
130 dump->size,
131 dump->offset, dump->total_size);
132 if ((dump->offset + dump->size) == dump->total_size)
133 len += sprintf(out_buf + len, "\n");
134 }
135
136 return len;
137}
138
139inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
140{
141 struct zfcp_adapter *adapter = fsf_req->adapter;
142 struct fsf_qtcb *qtcb = fsf_req->qtcb;
143 union fsf_prot_status_qual *prot_status_qual =
144 &qtcb->prefix.prot_status_qual;
145 union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual;
146 struct scsi_cmnd *scsi_cmnd;
147 struct zfcp_port *port;
148 struct zfcp_unit *unit;
149 struct zfcp_send_els *send_els;
150 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
151 struct zfcp_hba_dbf_record_response *response = &rec->type.response;
152 int level;
153 unsigned long flags;
154
155 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
156 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
157 strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
158
159 if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
160 (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
161 strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE);
162 level = 1;
163 } else if (qtcb->header.fsf_status != FSF_GOOD) {
164 strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE);
165 level = 1;
166 } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
167 (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
168 strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
169 level = 4;
170 } else if ((prot_status_qual->doubleword[0] != 0) ||
171 (prot_status_qual->doubleword[1] != 0) ||
172 (fsf_status_qual->doubleword[0] != 0) ||
173 (fsf_status_qual->doubleword[1] != 0)) {
174 strncpy(rec->tag2, "qual", ZFCP_DBF_TAG_SIZE);
175 level = 3;
176 } else {
177 strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
178 level = 6;
179 }
180
181 response->fsf_command = fsf_req->fsf_command;
182 response->fsf_reqid = (unsigned long)fsf_req;
183 response->fsf_seqno = fsf_req->seq_no;
184 response->fsf_issued = fsf_req->issued;
185 response->fsf_prot_status = qtcb->prefix.prot_status;
186 response->fsf_status = qtcb->header.fsf_status;
187 memcpy(response->fsf_prot_status_qual,
188 prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE);
189 memcpy(response->fsf_status_qual,
190 fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
191 response->fsf_req_status = fsf_req->status;
192 response->sbal_first = fsf_req->sbal_first;
193 response->sbal_curr = fsf_req->sbal_curr;
194 response->sbal_last = fsf_req->sbal_last;
195 response->pool = fsf_req->pool != NULL;
196 response->erp_action = (unsigned long)fsf_req->erp_action;
197
198 switch (fsf_req->fsf_command) {
199 case FSF_QTCB_FCP_CMND:
200 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
201 break;
202 scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
203 if (scsi_cmnd != NULL) {
204 response->data.send_fcp.scsi_cmnd
205 = (unsigned long)scsi_cmnd;
206 response->data.send_fcp.scsi_serial
207 = scsi_cmnd->serial_number;
208 }
209 break;
210
211 case FSF_QTCB_OPEN_PORT_WITH_DID:
212 case FSF_QTCB_CLOSE_PORT:
213 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
214 port = (struct zfcp_port *)fsf_req->data;
215 response->data.port.wwpn = port->wwpn;
216 response->data.port.d_id = port->d_id;
217 response->data.port.port_handle = qtcb->header.port_handle;
218 break;
219
220 case FSF_QTCB_OPEN_LUN:
221 case FSF_QTCB_CLOSE_LUN:
222 unit = (struct zfcp_unit *)fsf_req->data;
223 port = unit->port;
224 response->data.unit.wwpn = port->wwpn;
225 response->data.unit.fcp_lun = unit->fcp_lun;
226 response->data.unit.port_handle = qtcb->header.port_handle;
227 response->data.unit.lun_handle = qtcb->header.lun_handle;
228 break;
229
230 case FSF_QTCB_SEND_ELS:
231 send_els = (struct zfcp_send_els *)fsf_req->data;
232 response->data.send_els.d_id = qtcb->bottom.support.d_id;
233 response->data.send_els.ls_code = send_els->ls_code >> 24;
234 break;
235
236 case FSF_QTCB_ABORT_FCP_CMND:
237 case FSF_QTCB_SEND_GENERIC:
238 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
239 case FSF_QTCB_EXCHANGE_PORT_DATA:
240 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
241 case FSF_QTCB_UPLOAD_CONTROL_FILE:
242 break;
243 }
244
245 debug_event(adapter->hba_dbf, level,
246 rec, sizeof(struct zfcp_hba_dbf_record));
247 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
248}
249
250inline void
251zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
252 struct fsf_status_read_buffer *status_buffer)
253{
254 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
255 unsigned long flags;
256
257 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
258 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
259 strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
260 strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
261
262 rec->type.status.failed = adapter->status_read_failed;
263 if (status_buffer != NULL) {
264 rec->type.status.status_type = status_buffer->status_type;
265 rec->type.status.status_subtype = status_buffer->status_subtype;
266 memcpy(&rec->type.status.queue_designator,
267 &status_buffer->queue_designator,
268 sizeof(struct fsf_queue_designator));
269
270 switch (status_buffer->status_type) {
271 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
272 rec->type.status.payload_size =
273 ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL;
274 break;
275
276 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
277 rec->type.status.payload_size =
278 ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD;
279 break;
280
281 case FSF_STATUS_READ_LINK_DOWN:
282 switch (status_buffer->status_subtype) {
283 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
284 case FSF_STATUS_READ_SUB_FDISC_FAILED:
285 rec->type.status.payload_size =
286 sizeof(struct fsf_link_down_info);
287 }
288 break;
289
290 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
291 rec->type.status.payload_size =
292 ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT;
293 break;
294 }
295 memcpy(&rec->type.status.payload,
296 &status_buffer->payload, rec->type.status.payload_size);
297 }
298
299 debug_event(adapter->hba_dbf, 2,
300 rec, sizeof(struct zfcp_hba_dbf_record));
301 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
302}
303
304inline void
305zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
306 unsigned int qdio_error, unsigned int siga_error,
307 int sbal_index, int sbal_count)
308{
309 struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
310 unsigned long flags;
311
312 spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
313 memset(rec, 0, sizeof(struct zfcp_hba_dbf_record));
314 strncpy(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE);
315 rec->type.qdio.status = status;
316 rec->type.qdio.qdio_error = qdio_error;
317 rec->type.qdio.siga_error = siga_error;
318 rec->type.qdio.sbal_index = sbal_index;
319 rec->type.qdio.sbal_count = sbal_count;
320 debug_event(adapter->hba_dbf, 0,
321 rec, sizeof(struct zfcp_hba_dbf_record));
322 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
323}
324
325static inline int
326zfcp_hba_dbf_view_response(char *out_buf,
327 struct zfcp_hba_dbf_record_response *rec)
328{
329 int len = 0;
330
331 len += zfcp_dbf_view(out_buf + len, "fsf_command", "0x%08x",
332 rec->fsf_command);
333 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
334 rec->fsf_reqid);
335 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
336 rec->fsf_seqno);
337 len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
338 len += zfcp_dbf_view(out_buf + len, "fsf_prot_status", "0x%08x",
339 rec->fsf_prot_status);
340 len += zfcp_dbf_view(out_buf + len, "fsf_status", "0x%08x",
341 rec->fsf_status);
342 len += zfcp_dbf_view_dump(out_buf + len, "fsf_prot_status_qual",
343 rec->fsf_prot_status_qual,
344 FSF_PROT_STATUS_QUAL_SIZE,
345 0, FSF_PROT_STATUS_QUAL_SIZE);
346 len += zfcp_dbf_view_dump(out_buf + len, "fsf_status_qual",
347 rec->fsf_status_qual,
348 FSF_STATUS_QUALIFIER_SIZE,
349 0, FSF_STATUS_QUALIFIER_SIZE);
350 len += zfcp_dbf_view(out_buf + len, "fsf_req_status", "0x%08x",
351 rec->fsf_req_status);
352 len += zfcp_dbf_view(out_buf + len, "sbal_first", "0x%02x",
353 rec->sbal_first);
354 len += zfcp_dbf_view(out_buf + len, "sbal_curr", "0x%02x",
355 rec->sbal_curr);
356 len += zfcp_dbf_view(out_buf + len, "sbal_last", "0x%02x",
357 rec->sbal_last);
358 len += zfcp_dbf_view(out_buf + len, "pool", "0x%02x", rec->pool);
359
360 switch (rec->fsf_command) {
361 case FSF_QTCB_FCP_CMND:
362 if (rec->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
363 break;
364 len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
365 rec->data.send_fcp.scsi_cmnd);
366 len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
367 rec->data.send_fcp.scsi_serial);
368 break;
369
370 case FSF_QTCB_OPEN_PORT_WITH_DID:
371 case FSF_QTCB_CLOSE_PORT:
372 case FSF_QTCB_CLOSE_PHYSICAL_PORT:
373 len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
374 rec->data.port.wwpn);
375 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
376 rec->data.port.d_id);
377 len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
378 rec->data.port.port_handle);
379 break;
380
381 case FSF_QTCB_OPEN_LUN:
382 case FSF_QTCB_CLOSE_LUN:
383 len += zfcp_dbf_view(out_buf + len, "wwpn", "0x%016Lx",
384 rec->data.unit.wwpn);
385 len += zfcp_dbf_view(out_buf + len, "fcp_lun", "0x%016Lx",
386 rec->data.unit.fcp_lun);
387 len += zfcp_dbf_view(out_buf + len, "port_handle", "0x%08x",
388 rec->data.unit.port_handle);
389 len += zfcp_dbf_view(out_buf + len, "lun_handle", "0x%08x",
390 rec->data.unit.lun_handle);
391 break;
392
393 case FSF_QTCB_SEND_ELS:
394 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x",
395 rec->data.send_els.d_id);
396 len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
397 rec->data.send_els.ls_code);
398 break;
399
400 case FSF_QTCB_ABORT_FCP_CMND:
401 case FSF_QTCB_SEND_GENERIC:
402 case FSF_QTCB_EXCHANGE_CONFIG_DATA:
403 case FSF_QTCB_EXCHANGE_PORT_DATA:
404 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
405 case FSF_QTCB_UPLOAD_CONTROL_FILE:
406 break;
407 }
408
409 return len;
410}
411
412static inline int
413zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
414{
415 int len = 0;
416
417 len += zfcp_dbf_view(out_buf + len, "failed", "0x%02x", rec->failed);
418 len += zfcp_dbf_view(out_buf + len, "status_type", "0x%08x",
419 rec->status_type);
420 len += zfcp_dbf_view(out_buf + len, "status_subtype", "0x%08x",
421 rec->status_subtype);
422 len += zfcp_dbf_view_dump(out_buf + len, "queue_designator",
423 (char *)&rec->queue_designator,
424 sizeof(struct fsf_queue_designator),
425 0, sizeof(struct fsf_queue_designator));
426 len += zfcp_dbf_view_dump(out_buf + len, "payload",
427 (char *)&rec->payload,
428 rec->payload_size, 0, rec->payload_size);
429
430 return len;
431}
432
433static inline int
434zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
435{
436 int len = 0;
437
438 len += zfcp_dbf_view(out_buf + len, "status", "0x%08x", rec->status);
439 len += zfcp_dbf_view(out_buf + len, "qdio_error", "0x%08x",
440 rec->qdio_error);
441 len += zfcp_dbf_view(out_buf + len, "siga_error", "0x%08x",
442 rec->siga_error);
443 len += zfcp_dbf_view(out_buf + len, "sbal_index", "0x%02x",
444 rec->sbal_index);
445 len += zfcp_dbf_view(out_buf + len, "sbal_count", "0x%02x",
446 rec->sbal_count);
447
448 return len;
449}
450
451static int
452zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
453 char *out_buf, const char *in_buf)
454{
455 struct zfcp_hba_dbf_record *rec = (struct zfcp_hba_dbf_record *)in_buf;
456 int len = 0;
457
458 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
459 return 0;
460
461 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
462 if (isalpha(rec->tag2[0]))
463 len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
464 if (strncmp(rec->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
465 len += zfcp_hba_dbf_view_response(out_buf + len,
466 &rec->type.response);
467 else if (strncmp(rec->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
468 len += zfcp_hba_dbf_view_status(out_buf + len,
469 &rec->type.status);
470 else if (strncmp(rec->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
471 len += zfcp_hba_dbf_view_qdio(out_buf + len, &rec->type.qdio);
472
473 len += sprintf(out_buf + len, "\n");
474
475 return len;
476}
477
478struct debug_view zfcp_hba_dbf_view = {
479 "structured",
480 NULL,
481 &zfcp_dbf_view_header,
482 &zfcp_hba_dbf_view_format,
483 NULL,
484 NULL
485};
486
487inline void
488_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
489 u32 s_id, u32 d_id, void *buffer, int buflen)
490{
491 struct zfcp_send_ct *send_ct = (struct zfcp_send_ct *)fsf_req->data;
492 struct zfcp_port *port = send_ct->port;
493 struct zfcp_adapter *adapter = port->adapter;
494 struct ct_hdr *header = (struct ct_hdr *)buffer;
495 struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
496 struct zfcp_san_dbf_record_ct *ct = &rec->type.ct;
497 unsigned long flags;
498
499 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
500 memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
501 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
502 rec->fsf_reqid = (unsigned long)fsf_req;
503 rec->fsf_seqno = fsf_req->seq_no;
504 rec->s_id = s_id;
505 rec->d_id = d_id;
506 if (strncmp(tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
507 ct->type.request.cmd_req_code = header->cmd_rsp_code;
508 ct->type.request.revision = header->revision;
509 ct->type.request.gs_type = header->gs_type;
510 ct->type.request.gs_subtype = header->gs_subtype;
511 ct->type.request.options = header->options;
512 ct->type.request.max_res_size = header->max_res_size;
513 } else if (strncmp(tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
514 ct->type.response.cmd_rsp_code = header->cmd_rsp_code;
515 ct->type.response.revision = header->revision;
516 ct->type.response.reason_code = header->reason_code;
517 ct->type.response.reason_code_expl = header->reason_code_expl;
518 ct->type.response.vendor_unique = header->vendor_unique;
519 }
520 ct->payload_size =
521 min(buflen - (int)sizeof(struct ct_hdr), ZFCP_DBF_CT_PAYLOAD);
522 memcpy(ct->payload, buffer + sizeof(struct ct_hdr), ct->payload_size);
523 debug_event(adapter->san_dbf, 3,
524 rec, sizeof(struct zfcp_san_dbf_record));
525 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
526}
527
528inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
529{
530 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
531 struct zfcp_port *port = ct->port;
532 struct zfcp_adapter *adapter = port->adapter;
533
534 _zfcp_san_dbf_event_common_ct("octc", fsf_req,
535 fc_host_port_id(adapter->scsi_host),
536 port->d_id, zfcp_sg_to_address(ct->req),
537 ct->req->length);
538}
539
540inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
541{
542 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
543 struct zfcp_port *port = ct->port;
544 struct zfcp_adapter *adapter = port->adapter;
545
546 _zfcp_san_dbf_event_common_ct("rctc", fsf_req, port->d_id,
547 fc_host_port_id(adapter->scsi_host),
548 zfcp_sg_to_address(ct->resp),
549 ct->resp->length);
550}
551
552static inline void
553_zfcp_san_dbf_event_common_els(const char *tag, int level,
554 struct zfcp_fsf_req *fsf_req, u32 s_id,
555 u32 d_id, u8 ls_code, void *buffer, int buflen)
556{
557 struct zfcp_adapter *adapter = fsf_req->adapter;
558 struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
559 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
560 unsigned long flags;
561 int offset = 0;
562
563 spin_lock_irqsave(&adapter->san_dbf_lock, flags);
564 do {
565 memset(rec, 0, sizeof(struct zfcp_san_dbf_record));
566 if (offset == 0) {
567 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
568 rec->fsf_reqid = (unsigned long)fsf_req;
569 rec->fsf_seqno = fsf_req->seq_no;
570 rec->s_id = s_id;
571 rec->d_id = d_id;
572 rec->type.els.ls_code = ls_code;
573 buflen = min(buflen, ZFCP_DBF_ELS_MAX_PAYLOAD);
574 rec->type.els.payload_size = buflen;
575 memcpy(rec->type.els.payload,
576 buffer, min(buflen, ZFCP_DBF_ELS_PAYLOAD));
577 offset += min(buflen, ZFCP_DBF_ELS_PAYLOAD);
578 } else {
579 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
580 dump->total_size = buflen;
581 dump->offset = offset;
582 dump->size = min(buflen - offset,
583 (int)sizeof(struct zfcp_san_dbf_record)
584 - (int)sizeof(struct zfcp_dbf_dump));
585 memcpy(dump->data, buffer + offset, dump->size);
586 offset += dump->size;
587 }
588 debug_event(adapter->san_dbf, level,
589 rec, sizeof(struct zfcp_san_dbf_record));
590 } while (offset < buflen);
591 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
592}
593
594inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
595{
596 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
597
598 _zfcp_san_dbf_event_common_els("oels", 2, fsf_req,
599 fc_host_port_id(els->adapter->scsi_host),
600 els->d_id,
601 *(u8 *) zfcp_sg_to_address(els->req),
602 zfcp_sg_to_address(els->req),
603 els->req->length);
604}
605
606inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
607{
608 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
609
610 _zfcp_san_dbf_event_common_els("rels", 2, fsf_req, els->d_id,
611 fc_host_port_id(els->adapter->scsi_host),
612 *(u8 *) zfcp_sg_to_address(els->req),
613 zfcp_sg_to_address(els->resp),
614 els->resp->length);
615}
616
617inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
618{
619 struct zfcp_adapter *adapter = fsf_req->adapter;
620 struct fsf_status_read_buffer *status_buffer =
621 (struct fsf_status_read_buffer *)fsf_req->data;
622 int length = (int)status_buffer->length -
623 (int)((void *)&status_buffer->payload - (void *)status_buffer);
624
625 _zfcp_san_dbf_event_common_els("iels", 1, fsf_req, status_buffer->d_id,
626 fc_host_port_id(adapter->scsi_host),
627 *(u8 *) status_buffer->payload,
628 (void *)status_buffer->payload, length);
629}
630
631static int
632zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
633 char *out_buf, const char *in_buf)
634{
635 struct zfcp_san_dbf_record *rec = (struct zfcp_san_dbf_record *)in_buf;
636 char *buffer = NULL;
637 int buflen = 0, total = 0;
638 int len = 0;
639
640 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
641 return 0;
642
643 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
644 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
645 rec->fsf_reqid);
646 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
647 rec->fsf_seqno);
648 len += zfcp_dbf_view(out_buf + len, "s_id", "0x%06x", rec->s_id);
649 len += zfcp_dbf_view(out_buf + len, "d_id", "0x%06x", rec->d_id);
650
651 if (strncmp(rec->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
652 len += zfcp_dbf_view(out_buf + len, "cmd_req_code", "0x%04x",
653 rec->type.ct.type.request.cmd_req_code);
654 len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
655 rec->type.ct.type.request.revision);
656 len += zfcp_dbf_view(out_buf + len, "gs_type", "0x%02x",
657 rec->type.ct.type.request.gs_type);
658 len += zfcp_dbf_view(out_buf + len, "gs_subtype", "0x%02x",
659 rec->type.ct.type.request.gs_subtype);
660 len += zfcp_dbf_view(out_buf + len, "options", "0x%02x",
661 rec->type.ct.type.request.options);
662 len += zfcp_dbf_view(out_buf + len, "max_res_size", "0x%04x",
663 rec->type.ct.type.request.max_res_size);
664 total = rec->type.ct.payload_size;
665 buffer = rec->type.ct.payload;
666 buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
667 } else if (strncmp(rec->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
668 len += zfcp_dbf_view(out_buf + len, "cmd_rsp_code", "0x%04x",
669 rec->type.ct.type.response.cmd_rsp_code);
670 len += zfcp_dbf_view(out_buf + len, "revision", "0x%02x",
671 rec->type.ct.type.response.revision);
672 len += zfcp_dbf_view(out_buf + len, "reason_code", "0x%02x",
673 rec->type.ct.type.response.reason_code);
674 len +=
675 zfcp_dbf_view(out_buf + len, "reason_code_expl", "0x%02x",
676 rec->type.ct.type.response.reason_code_expl);
677 len +=
678 zfcp_dbf_view(out_buf + len, "vendor_unique", "0x%02x",
679 rec->type.ct.type.response.vendor_unique);
680 total = rec->type.ct.payload_size;
681 buffer = rec->type.ct.payload;
682 buflen = min(total, ZFCP_DBF_CT_PAYLOAD);
683 } else if (strncmp(rec->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
684 strncmp(rec->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
685 strncmp(rec->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
686 len += zfcp_dbf_view(out_buf + len, "ls_code", "0x%02x",
687 rec->type.els.ls_code);
688 total = rec->type.els.payload_size;
689 buffer = rec->type.els.payload;
690 buflen = min(total, ZFCP_DBF_ELS_PAYLOAD);
691 }
692
693 len += zfcp_dbf_view_dump(out_buf + len, "payload",
694 buffer, buflen, 0, total);
695
696 if (buflen == total)
697 len += sprintf(out_buf + len, "\n");
698
699 return len;
700}
701
702struct debug_view zfcp_san_dbf_view = {
703 "structured",
704 NULL,
705 &zfcp_dbf_view_header,
706 &zfcp_san_dbf_view_format,
707 NULL,
708 NULL
709};
710
711static inline void
712_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
713 struct zfcp_adapter *adapter,
714 struct scsi_cmnd *scsi_cmnd,
715 struct zfcp_fsf_req *new_fsf_req)
716{
717 struct zfcp_fsf_req *fsf_req =
718 (struct zfcp_fsf_req *)scsi_cmnd->host_scribble;
719 struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
720 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
721 unsigned long flags;
722 struct fcp_rsp_iu *fcp_rsp;
723 char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
724 int offset = 0, buflen = 0;
725
726 spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
727 do {
728 memset(rec, 0, sizeof(struct zfcp_scsi_dbf_record));
729 if (offset == 0) {
730 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
731 strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
732 if (scsi_cmnd->device) {
733 rec->scsi_id = scsi_cmnd->device->id;
734 rec->scsi_lun = scsi_cmnd->device->lun;
735 }
736 rec->scsi_result = scsi_cmnd->result;
737 rec->scsi_cmnd = (unsigned long)scsi_cmnd;
738 rec->scsi_serial = scsi_cmnd->serial_number;
739 memcpy(rec->scsi_opcode,
740 &scsi_cmnd->cmnd,
741 min((int)scsi_cmnd->cmd_len,
742 ZFCP_DBF_SCSI_OPCODE));
743 rec->scsi_retries = scsi_cmnd->retries;
744 rec->scsi_allowed = scsi_cmnd->allowed;
745 if (fsf_req != NULL) {
746 fcp_rsp = (struct fcp_rsp_iu *)
747 &(fsf_req->qtcb->bottom.io.fcp_rsp);
748 fcp_rsp_info =
749 zfcp_get_fcp_rsp_info_ptr(fcp_rsp);
750 fcp_sns_info =
751 zfcp_get_fcp_sns_info_ptr(fcp_rsp);
752
753 rec->type.fcp.rsp_validity =
754 fcp_rsp->validity.value;
755 rec->type.fcp.rsp_scsi_status =
756 fcp_rsp->scsi_status;
757 rec->type.fcp.rsp_resid = fcp_rsp->fcp_resid;
758 if (fcp_rsp->validity.bits.fcp_rsp_len_valid)
759 rec->type.fcp.rsp_code =
760 *(fcp_rsp_info + 3);
761 if (fcp_rsp->validity.bits.fcp_sns_len_valid) {
762 buflen = min((int)fcp_rsp->fcp_sns_len,
763 ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
764 rec->type.fcp.sns_info_len = buflen;
765 memcpy(rec->type.fcp.sns_info,
766 fcp_sns_info,
767 min(buflen,
768 ZFCP_DBF_SCSI_FCP_SNS_INFO));
769 offset += min(buflen,
770 ZFCP_DBF_SCSI_FCP_SNS_INFO);
771 }
772
773 rec->fsf_reqid = (unsigned long)fsf_req;
774 rec->fsf_seqno = fsf_req->seq_no;
775 rec->fsf_issued = fsf_req->issued;
776 }
777 if (new_fsf_req != NULL) {
778 rec->type.new_fsf_req.fsf_reqid =
779 (unsigned long)
780 new_fsf_req;
781 rec->type.new_fsf_req.fsf_seqno =
782 new_fsf_req->seq_no;
783 rec->type.new_fsf_req.fsf_issued =
784 new_fsf_req->issued;
785 }
786 } else {
787 strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE);
788 dump->total_size = buflen;
789 dump->offset = offset;
790 dump->size = min(buflen - offset,
791 (int)sizeof(struct
792 zfcp_scsi_dbf_record) -
793 (int)sizeof(struct zfcp_dbf_dump));
794 memcpy(dump->data, fcp_sns_info + offset, dump->size);
795 offset += dump->size;
796 }
797 debug_event(adapter->scsi_dbf, level,
798 rec, sizeof(struct zfcp_scsi_dbf_record));
799 } while (offset < buflen);
800 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
801}
802
803inline void
804zfcp_scsi_dbf_event_result(const char *tag, int level,
805 struct zfcp_adapter *adapter,
806 struct scsi_cmnd *scsi_cmnd)
807{
808 _zfcp_scsi_dbf_event_common("rslt",
809 tag, level, adapter, scsi_cmnd, NULL);
810}
811
812inline void
813zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
814 struct scsi_cmnd *scsi_cmnd,
815 struct zfcp_fsf_req *new_fsf_req)
816{
817 _zfcp_scsi_dbf_event_common("abrt",
818 tag, 1, adapter, scsi_cmnd, new_fsf_req);
819}
820
821inline void
822zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
823 struct scsi_cmnd *scsi_cmnd)
824{
825 struct zfcp_adapter *adapter = unit->port->adapter;
826
827 _zfcp_scsi_dbf_event_common(flag == FCP_TARGET_RESET ? "trst" : "lrst",
828 tag, 1, adapter, scsi_cmnd, NULL);
829}
830
831static int
832zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
833 char *out_buf, const char *in_buf)
834{
835 struct zfcp_scsi_dbf_record *rec =
836 (struct zfcp_scsi_dbf_record *)in_buf;
837 int len = 0;
838
839 if (strncmp(rec->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
840 return 0;
841
842 len += zfcp_dbf_tag(out_buf + len, "tag", rec->tag);
843 len += zfcp_dbf_tag(out_buf + len, "tag2", rec->tag2);
844 len += zfcp_dbf_view(out_buf + len, "scsi_id", "0x%08x", rec->scsi_id);
845 len += zfcp_dbf_view(out_buf + len, "scsi_lun", "0x%08x",
846 rec->scsi_lun);
847 len += zfcp_dbf_view(out_buf + len, "scsi_result", "0x%08x",
848 rec->scsi_result);
849 len += zfcp_dbf_view(out_buf + len, "scsi_cmnd", "0x%0Lx",
850 rec->scsi_cmnd);
851 len += zfcp_dbf_view(out_buf + len, "scsi_serial", "0x%016Lx",
852 rec->scsi_serial);
853 len += zfcp_dbf_view_dump(out_buf + len, "scsi_opcode",
854 rec->scsi_opcode,
855 ZFCP_DBF_SCSI_OPCODE,
856 0, ZFCP_DBF_SCSI_OPCODE);
857 len += zfcp_dbf_view(out_buf + len, "scsi_retries", "0x%02x",
858 rec->scsi_retries);
859 len += zfcp_dbf_view(out_buf + len, "scsi_allowed", "0x%02x",
860 rec->scsi_allowed);
861 len += zfcp_dbf_view(out_buf + len, "fsf_reqid", "0x%0Lx",
862 rec->fsf_reqid);
863 len += zfcp_dbf_view(out_buf + len, "fsf_seqno", "0x%08x",
864 rec->fsf_seqno);
865 len += zfcp_dbf_stck(out_buf + len, "fsf_issued", rec->fsf_issued);
866 if (strncmp(rec->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
867 len +=
868 zfcp_dbf_view(out_buf + len, "fcp_rsp_validity", "0x%02x",
869 rec->type.fcp.rsp_validity);
870 len +=
871 zfcp_dbf_view(out_buf + len, "fcp_rsp_scsi_status",
872 "0x%02x", rec->type.fcp.rsp_scsi_status);
873 len +=
874 zfcp_dbf_view(out_buf + len, "fcp_rsp_resid", "0x%08x",
875 rec->type.fcp.rsp_resid);
876 len +=
877 zfcp_dbf_view(out_buf + len, "fcp_rsp_code", "0x%08x",
878 rec->type.fcp.rsp_code);
879 len +=
880 zfcp_dbf_view(out_buf + len, "fcp_sns_info_len", "0x%08x",
881 rec->type.fcp.sns_info_len);
882 len +=
883 zfcp_dbf_view_dump(out_buf + len, "fcp_sns_info",
884 rec->type.fcp.sns_info,
885 min((int)rec->type.fcp.sns_info_len,
886 ZFCP_DBF_SCSI_FCP_SNS_INFO), 0,
887 rec->type.fcp.sns_info_len);
888 } else if (strncmp(rec->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) {
889 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_abort", "0x%0Lx",
890 rec->type.new_fsf_req.fsf_reqid);
891 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_abort", "0x%08x",
892 rec->type.new_fsf_req.fsf_seqno);
893 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
894 rec->type.new_fsf_req.fsf_issued);
895 } else if ((strncmp(rec->tag, "trst", ZFCP_DBF_TAG_SIZE) == 0) ||
896 (strncmp(rec->tag, "lrst", ZFCP_DBF_TAG_SIZE) == 0)) {
897 len += zfcp_dbf_view(out_buf + len, "fsf_reqid_reset", "0x%0Lx",
898 rec->type.new_fsf_req.fsf_reqid);
899 len += zfcp_dbf_view(out_buf + len, "fsf_seqno_reset", "0x%08x",
900 rec->type.new_fsf_req.fsf_seqno);
901 len += zfcp_dbf_stck(out_buf + len, "fsf_issued",
902 rec->type.new_fsf_req.fsf_issued);
903 }
904
905 len += sprintf(out_buf + len, "\n");
906
907 return len;
908}
909
910struct debug_view zfcp_scsi_dbf_view = {
911 "structured",
912 NULL,
913 &zfcp_dbf_view_header,
914 &zfcp_scsi_dbf_view_format,
915 NULL,
916 NULL
917};
918
919/**
920 * zfcp_adapter_debug_register - registers debug feature for an adapter
921 * @adapter: pointer to adapter for which debug features should be registered
922 * return: -ENOMEM on error, 0 otherwise
923 */
924int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
925{
926 char dbf_name[DEBUG_MAX_NAME_LEN];
927
928 /* debug feature area which records recovery activity */
929 spin_lock_init(&adapter->erp_dbf_lock);
930 sprintf(dbf_name, "zfcp_%s_erp", zfcp_get_busid_by_adapter(adapter));
931 adapter->erp_dbf = debug_register(dbf_name, dbfsize, 2,
932 sizeof(struct zfcp_erp_dbf_record));
933 if (!adapter->erp_dbf)
934 goto failed;
935 debug_register_view(adapter->erp_dbf, &debug_hex_ascii_view);
936 debug_set_level(adapter->erp_dbf, 3);
937
938 /* debug feature area which records HBA (FSF and QDIO) conditions */
939 spin_lock_init(&adapter->hba_dbf_lock);
940 sprintf(dbf_name, "zfcp_%s_hba", zfcp_get_busid_by_adapter(adapter));
941 adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
942 sizeof(struct zfcp_hba_dbf_record));
943 if (!adapter->hba_dbf)
944 goto failed;
945 debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view);
946 debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view);
947 debug_set_level(adapter->hba_dbf, 3);
948
949 /* debug feature area which records SAN command failures and recovery */
950 spin_lock_init(&adapter->san_dbf_lock);
951 sprintf(dbf_name, "zfcp_%s_san", zfcp_get_busid_by_adapter(adapter));
952 adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
953 sizeof(struct zfcp_san_dbf_record));
954 if (!adapter->san_dbf)
955 goto failed;
956 debug_register_view(adapter->san_dbf, &debug_hex_ascii_view);
957 debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view);
958 debug_set_level(adapter->san_dbf, 6);
959
960 /* debug feature area which records SCSI command failures and recovery */
961 spin_lock_init(&adapter->scsi_dbf_lock);
962 sprintf(dbf_name, "zfcp_%s_scsi", zfcp_get_busid_by_adapter(adapter));
963 adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
964 sizeof(struct zfcp_scsi_dbf_record));
965 if (!adapter->scsi_dbf)
966 goto failed;
967 debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view);
968 debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view);
969 debug_set_level(adapter->scsi_dbf, 3);
970
971 return 0;
972
973 failed:
974 zfcp_adapter_debug_unregister(adapter);
975
976 return -ENOMEM;
977}
978
979/**
980 * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
981 * @adapter: pointer to adapter for which debug features should be unregistered
982 */
983void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
984{
985 debug_unregister(adapter->scsi_dbf);
986 debug_unregister(adapter->san_dbf);
987 debug_unregister(adapter->hba_dbf);
988 debug_unregister(adapter->erp_dbf);
989 adapter->scsi_dbf = NULL;
990 adapter->san_dbf = NULL;
991 adapter->hba_dbf = NULL;
992 adapter->erp_dbf = NULL;
993}
994
995#undef ZFCP_LOG_AREA
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 455e902533a9..d81b737d68cc 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -66,7 +66,7 @@
66/********************* GENERAL DEFINES *********************************/ 66/********************* GENERAL DEFINES *********************************/
67 67
68/* zfcp version number, it consists of major, minor, and patch-level number */ 68/* zfcp version number, it consists of major, minor, and patch-level number */
69#define ZFCP_VERSION "4.3.0" 69#define ZFCP_VERSION "4.5.0"
70 70
71/** 71/**
72 * zfcp_sg_to_address - determine kernel address from struct scatterlist 72 * zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -154,13 +154,17 @@ typedef u32 scsi_lun_t;
154#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100 154#define ZFCP_EXCHANGE_CONFIG_DATA_FIRST_SLEEP 100
155#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7 155#define ZFCP_EXCHANGE_CONFIG_DATA_RETRIES 7
156 156
157/* Retry 5 times every 2 second, then every minute */
158#define ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES 5
159#define ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP 200
160#define ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP 6000
161
157/* timeout value for "default timer" for fsf requests */ 162/* timeout value for "default timer" for fsf requests */
158#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ); 163#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ);
159 164
160/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ 165/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
161 166
162typedef unsigned long long wwn_t; 167typedef unsigned long long wwn_t;
163typedef unsigned int fc_id_t;
164typedef unsigned long long fcp_lun_t; 168typedef unsigned long long fcp_lun_t;
165/* data length field may be at variable position in FCP-2 FCP_CMND IU */ 169/* data length field may be at variable position in FCP-2 FCP_CMND IU */
166typedef unsigned int fcp_dl_t; 170typedef unsigned int fcp_dl_t;
@@ -281,6 +285,171 @@ struct fcp_logo {
281} __attribute__((packed)); 285} __attribute__((packed));
282 286
283/* 287/*
288 * DBF stuff
289 */
290#define ZFCP_DBF_TAG_SIZE 4
291
292struct zfcp_dbf_dump {
293 u8 tag[ZFCP_DBF_TAG_SIZE];
294 u32 total_size; /* size of total dump data */
295 u32 offset; /* how much data has being already dumped */
296 u32 size; /* how much data comes with this record */
297 u8 data[]; /* dump data */
298} __attribute__ ((packed));
299
300/* FIXME: to be inflated when reworking the erp dbf */
301struct zfcp_erp_dbf_record {
302 u8 dummy[16];
303} __attribute__ ((packed));
304
305struct zfcp_hba_dbf_record_response {
306 u32 fsf_command;
307 u64 fsf_reqid;
308 u32 fsf_seqno;
309 u64 fsf_issued;
310 u32 fsf_prot_status;
311 u32 fsf_status;
312 u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
313 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
314 u32 fsf_req_status;
315 u8 sbal_first;
316 u8 sbal_curr;
317 u8 sbal_last;
318 u8 pool;
319 u64 erp_action;
320 union {
321 struct {
322 u64 scsi_cmnd;
323 u64 scsi_serial;
324 } send_fcp;
325 struct {
326 u64 wwpn;
327 u32 d_id;
328 u32 port_handle;
329 } port;
330 struct {
331 u64 wwpn;
332 u64 fcp_lun;
333 u32 port_handle;
334 u32 lun_handle;
335 } unit;
336 struct {
337 u32 d_id;
338 u8 ls_code;
339 } send_els;
340 } data;
341} __attribute__ ((packed));
342
343struct zfcp_hba_dbf_record_status {
344 u8 failed;
345 u32 status_type;
346 u32 status_subtype;
347 struct fsf_queue_designator
348 queue_designator;
349 u32 payload_size;
350#define ZFCP_DBF_UNSOL_PAYLOAD 80
351#define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32
352#define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56
353#define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32)
354 u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
355} __attribute__ ((packed));
356
357struct zfcp_hba_dbf_record_qdio {
358 u32 status;
359 u32 qdio_error;
360 u32 siga_error;
361 u8 sbal_index;
362 u8 sbal_count;
363} __attribute__ ((packed));
364
365struct zfcp_hba_dbf_record {
366 u8 tag[ZFCP_DBF_TAG_SIZE];
367 u8 tag2[ZFCP_DBF_TAG_SIZE];
368 union {
369 struct zfcp_hba_dbf_record_response response;
370 struct zfcp_hba_dbf_record_status status;
371 struct zfcp_hba_dbf_record_qdio qdio;
372 } type;
373} __attribute__ ((packed));
374
375struct zfcp_san_dbf_record_ct {
376 union {
377 struct {
378 u16 cmd_req_code;
379 u8 revision;
380 u8 gs_type;
381 u8 gs_subtype;
382 u8 options;
383 u16 max_res_size;
384 } request;
385 struct {
386 u16 cmd_rsp_code;
387 u8 revision;
388 u8 reason_code;
389 u8 reason_code_expl;
390 u8 vendor_unique;
391 } response;
392 } type;
393 u32 payload_size;
394#define ZFCP_DBF_CT_PAYLOAD 24
395 u8 payload[ZFCP_DBF_CT_PAYLOAD];
396} __attribute__ ((packed));
397
398struct zfcp_san_dbf_record_els {
399 u8 ls_code;
400 u32 payload_size;
401#define ZFCP_DBF_ELS_PAYLOAD 32
402#define ZFCP_DBF_ELS_MAX_PAYLOAD 1024
403 u8 payload[ZFCP_DBF_ELS_PAYLOAD];
404} __attribute__ ((packed));
405
406struct zfcp_san_dbf_record {
407 u8 tag[ZFCP_DBF_TAG_SIZE];
408 u64 fsf_reqid;
409 u32 fsf_seqno;
410 u32 s_id;
411 u32 d_id;
412 union {
413 struct zfcp_san_dbf_record_ct ct;
414 struct zfcp_san_dbf_record_els els;
415 } type;
416} __attribute__ ((packed));
417
418struct zfcp_scsi_dbf_record {
419 u8 tag[ZFCP_DBF_TAG_SIZE];
420 u8 tag2[ZFCP_DBF_TAG_SIZE];
421 u32 scsi_id;
422 u32 scsi_lun;
423 u32 scsi_result;
424 u64 scsi_cmnd;
425 u64 scsi_serial;
426#define ZFCP_DBF_SCSI_OPCODE 16
427 u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
428 u8 scsi_retries;
429 u8 scsi_allowed;
430 u64 fsf_reqid;
431 u32 fsf_seqno;
432 u64 fsf_issued;
433 union {
434 struct {
435 u64 fsf_reqid;
436 u32 fsf_seqno;
437 u64 fsf_issued;
438 } new_fsf_req;
439 struct {
440 u8 rsp_validity;
441 u8 rsp_scsi_status;
442 u32 rsp_resid;
443 u8 rsp_code;
444#define ZFCP_DBF_SCSI_FCP_SNS_INFO 16
445#define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256
446 u32 sns_info_len;
447 u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
448 } fcp;
449 } type;
450} __attribute__ ((packed));
451
452/*
284 * FC-FS stuff 453 * FC-FS stuff
285 */ 454 */
286#define R_A_TOV 10 /* seconds */ 455#define R_A_TOV 10 /* seconds */
@@ -339,34 +508,6 @@ struct zfcp_rc_entry {
339 */ 508 */
340#define ZFCP_CT_TIMEOUT (3 * R_A_TOV) 509#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
341 510
342
343/***************** S390 DEBUG FEATURE SPECIFIC DEFINES ***********************/
344
345/* debug feature entries per adapter */
346#define ZFCP_ERP_DBF_INDEX 1
347#define ZFCP_ERP_DBF_AREAS 2
348#define ZFCP_ERP_DBF_LENGTH 16
349#define ZFCP_ERP_DBF_LEVEL 3
350#define ZFCP_ERP_DBF_NAME "zfcperp"
351
352#define ZFCP_CMD_DBF_INDEX 2
353#define ZFCP_CMD_DBF_AREAS 1
354#define ZFCP_CMD_DBF_LENGTH 8
355#define ZFCP_CMD_DBF_LEVEL 3
356#define ZFCP_CMD_DBF_NAME "zfcpcmd"
357
358#define ZFCP_ABORT_DBF_INDEX 2
359#define ZFCP_ABORT_DBF_AREAS 1
360#define ZFCP_ABORT_DBF_LENGTH 8
361#define ZFCP_ABORT_DBF_LEVEL 6
362#define ZFCP_ABORT_DBF_NAME "zfcpabt"
363
364#define ZFCP_IN_ELS_DBF_INDEX 2
365#define ZFCP_IN_ELS_DBF_AREAS 1
366#define ZFCP_IN_ELS_DBF_LENGTH 8
367#define ZFCP_IN_ELS_DBF_LEVEL 6
368#define ZFCP_IN_ELS_DBF_NAME "zfcpels"
369
370/******************** LOGGING MACROS AND DEFINES *****************************/ 511/******************** LOGGING MACROS AND DEFINES *****************************/
371 512
372/* 513/*
@@ -501,6 +642,7 @@ do { \
501#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 642#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080
502#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 643#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
503#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 644#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
645#define ZFCP_STATUS_ADAPTER_XPORT_OK 0x00000800
504 646
505#define ZFCP_STATUS_ADAPTER_SCSI_UP \ 647#define ZFCP_STATUS_ADAPTER_SCSI_UP \
506 (ZFCP_STATUS_COMMON_UNBLOCKED | \ 648 (ZFCP_STATUS_COMMON_UNBLOCKED | \
@@ -635,45 +777,6 @@ struct zfcp_adapter_mempool {
635 mempool_t *data_gid_pn; 777 mempool_t *data_gid_pn;
636}; 778};
637 779
638struct zfcp_exchange_config_data{
639};
640
641struct zfcp_open_port {
642 struct zfcp_port *port;
643};
644
645struct zfcp_close_port {
646 struct zfcp_port *port;
647};
648
649struct zfcp_open_unit {
650 struct zfcp_unit *unit;
651};
652
653struct zfcp_close_unit {
654 struct zfcp_unit *unit;
655};
656
657struct zfcp_close_physical_port {
658 struct zfcp_port *port;
659};
660
661struct zfcp_send_fcp_command_task {
662 struct zfcp_fsf_req *fsf_req;
663 struct zfcp_unit *unit;
664 struct scsi_cmnd *scsi_cmnd;
665 unsigned long start_jiffies;
666};
667
668struct zfcp_send_fcp_command_task_management {
669 struct zfcp_unit *unit;
670};
671
672struct zfcp_abort_fcp_command {
673 struct zfcp_fsf_req *fsf_req;
674 struct zfcp_unit *unit;
675};
676
677/* 780/*
678 * header for CT_IU 781 * header for CT_IU
679 */ 782 */
@@ -702,7 +805,7 @@ struct ct_iu_gid_pn_req {
702/* FS_ACC IU and data unit for GID_PN nameserver request */ 805/* FS_ACC IU and data unit for GID_PN nameserver request */
703struct ct_iu_gid_pn_resp { 806struct ct_iu_gid_pn_resp {
704 struct ct_hdr header; 807 struct ct_hdr header;
705 fc_id_t d_id; 808 u32 d_id;
706} __attribute__ ((packed)); 809} __attribute__ ((packed));
707 810
708typedef void (*zfcp_send_ct_handler_t)(unsigned long); 811typedef void (*zfcp_send_ct_handler_t)(unsigned long);
@@ -768,7 +871,7 @@ typedef void (*zfcp_send_els_handler_t)(unsigned long);
768struct zfcp_send_els { 871struct zfcp_send_els {
769 struct zfcp_adapter *adapter; 872 struct zfcp_adapter *adapter;
770 struct zfcp_port *port; 873 struct zfcp_port *port;
771 fc_id_t d_id; 874 u32 d_id;
772 struct scatterlist *req; 875 struct scatterlist *req;
773 struct scatterlist *resp; 876 struct scatterlist *resp;
774 unsigned int req_count; 877 unsigned int req_count;
@@ -781,33 +884,6 @@ struct zfcp_send_els {
781 int status; 884 int status;
782}; 885};
783 886
784struct zfcp_status_read {
785 struct fsf_status_read_buffer *buffer;
786};
787
788struct zfcp_fsf_done {
789 struct completion *complete;
790 int status;
791};
792
793/* request specific data */
794union zfcp_req_data {
795 struct zfcp_exchange_config_data exchange_config_data;
796 struct zfcp_open_port open_port;
797 struct zfcp_close_port close_port;
798 struct zfcp_open_unit open_unit;
799 struct zfcp_close_unit close_unit;
800 struct zfcp_close_physical_port close_physical_port;
801 struct zfcp_send_fcp_command_task send_fcp_command_task;
802 struct zfcp_send_fcp_command_task_management
803 send_fcp_command_task_management;
804 struct zfcp_abort_fcp_command abort_fcp_command;
805 struct zfcp_send_ct *send_ct;
806 struct zfcp_send_els *send_els;
807 struct zfcp_status_read status_read;
808 struct fsf_qtcb_bottom_port *port_data;
809};
810
811struct zfcp_qdio_queue { 887struct zfcp_qdio_queue {
812 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */ 888 struct qdio_buffer *buffer[QDIO_MAX_BUFFERS_PER_Q]; /* SBALs */
813 u8 free_index; /* index of next free bfr 889 u8 free_index; /* index of next free bfr
@@ -838,21 +914,19 @@ struct zfcp_adapter {
838 atomic_t refcount; /* reference count */ 914 atomic_t refcount; /* reference count */
839 wait_queue_head_t remove_wq; /* can be used to wait for 915 wait_queue_head_t remove_wq; /* can be used to wait for
840 refcount drop to zero */ 916 refcount drop to zero */
841 wwn_t wwnn; /* WWNN */
842 wwn_t wwpn; /* WWPN */
843 fc_id_t s_id; /* N_Port ID */
844 wwn_t peer_wwnn; /* P2P peer WWNN */ 917 wwn_t peer_wwnn; /* P2P peer WWNN */
845 wwn_t peer_wwpn; /* P2P peer WWPN */ 918 wwn_t peer_wwpn; /* P2P peer WWPN */
846 fc_id_t peer_d_id; /* P2P peer D_ID */ 919 u32 peer_d_id; /* P2P peer D_ID */
920 wwn_t physical_wwpn; /* WWPN of physical port */
921 u32 physical_s_id; /* local FC port ID */
847 struct ccw_device *ccw_device; /* S/390 ccw device */ 922 struct ccw_device *ccw_device; /* S/390 ccw device */
848 u8 fc_service_class; 923 u8 fc_service_class;
849 u32 fc_topology; /* FC topology */ 924 u32 fc_topology; /* FC topology */
850 u32 fc_link_speed; /* FC interface speed */
851 u32 hydra_version; /* Hydra version */ 925 u32 hydra_version; /* Hydra version */
852 u32 fsf_lic_version; 926 u32 fsf_lic_version;
853 u32 supported_features;/* of FCP channel */ 927 u32 adapter_features; /* FCP channel features */
928 u32 connection_features; /* host connection features */
854 u32 hardware_version; /* of FCP channel */ 929 u32 hardware_version; /* of FCP channel */
855 u8 serial_number[32]; /* of hardware */
856 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 930 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
857 unsigned short scsi_host_no; /* Assigned host number */ 931 unsigned short scsi_host_no; /* Assigned host number */
858 unsigned char name[9]; 932 unsigned char name[9];
@@ -889,11 +963,18 @@ struct zfcp_adapter {
889 u32 erp_low_mem_count; /* nr of erp actions waiting 963 u32 erp_low_mem_count; /* nr of erp actions waiting
890 for memory */ 964 for memory */
891 struct zfcp_port *nameserver_port; /* adapter's nameserver */ 965 struct zfcp_port *nameserver_port; /* adapter's nameserver */
892 debug_info_t *erp_dbf; /* S/390 debug features */ 966 debug_info_t *erp_dbf;
893 debug_info_t *abort_dbf; 967 debug_info_t *hba_dbf;
894 debug_info_t *in_els_dbf; 968 debug_info_t *san_dbf; /* debug feature areas */
895 debug_info_t *cmd_dbf; 969 debug_info_t *scsi_dbf;
896 spinlock_t dbf_lock; 970 spinlock_t erp_dbf_lock;
971 spinlock_t hba_dbf_lock;
972 spinlock_t san_dbf_lock;
973 spinlock_t scsi_dbf_lock;
974 struct zfcp_erp_dbf_record erp_dbf_buf;
975 struct zfcp_hba_dbf_record hba_dbf_buf;
976 struct zfcp_san_dbf_record san_dbf_buf;
977 struct zfcp_scsi_dbf_record scsi_dbf_buf;
897 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 978 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
898 struct qdio_initialize qdio_init_data; /* for qdio_establish */ 979 struct qdio_initialize qdio_init_data; /* for qdio_establish */
899 struct device generic_services; /* directory for WKA ports */ 980 struct device generic_services; /* directory for WKA ports */
@@ -919,7 +1000,7 @@ struct zfcp_port {
919 atomic_t status; /* status of this remote port */ 1000 atomic_t status; /* status of this remote port */
920 wwn_t wwnn; /* WWNN if known */ 1001 wwn_t wwnn; /* WWNN if known */
921 wwn_t wwpn; /* WWPN */ 1002 wwn_t wwpn; /* WWPN */
922 fc_id_t d_id; /* D_ID */ 1003 u32 d_id; /* D_ID */
923 u32 handle; /* handle assigned by FSF */ 1004 u32 handle; /* handle assigned by FSF */
924 struct zfcp_erp_action erp_action; /* pending error recovery */ 1005 struct zfcp_erp_action erp_action; /* pending error recovery */
925 atomic_t erp_counter; 1006 atomic_t erp_counter;
@@ -963,11 +1044,13 @@ struct zfcp_fsf_req {
963 u32 fsf_command; /* FSF Command copy */ 1044 u32 fsf_command; /* FSF Command copy */
964 struct fsf_qtcb *qtcb; /* address of associated QTCB */ 1045 struct fsf_qtcb *qtcb; /* address of associated QTCB */
965 u32 seq_no; /* Sequence number of request */ 1046 u32 seq_no; /* Sequence number of request */
966 union zfcp_req_data data; /* Info fields of request */ 1047 unsigned long data; /* private data of request */
967 struct zfcp_erp_action *erp_action; /* used if this request is 1048 struct zfcp_erp_action *erp_action; /* used if this request is
968 issued on behalf of erp */ 1049 issued on behalf of erp */
969 mempool_t *pool; /* used if request was alloacted 1050 mempool_t *pool; /* used if request was alloacted
970 from emergency pool */ 1051 from emergency pool */
1052 unsigned long long issued; /* request sent time (STCK) */
1053 struct zfcp_unit *unit;
971}; 1054};
972 1055
973typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*); 1056typedef void zfcp_fsf_req_handler_t(struct zfcp_fsf_req*);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index cb4f612550ba..023f4e558ae4 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -82,6 +82,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
82static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 82static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
83static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 83static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
84static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *); 84static int zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *);
85static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *);
85static int zfcp_erp_adapter_strategy_open_fsf_statusread( 86static int zfcp_erp_adapter_strategy_open_fsf_statusread(
86 struct zfcp_erp_action *); 87 struct zfcp_erp_action *);
87 88
@@ -345,13 +346,13 @@ zfcp_erp_adisc(struct zfcp_port *port)
345 346
346 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports 347 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
347 without FC-AL-2 capability, so we don't set it */ 348 without FC-AL-2 capability, so we don't set it */
348 adisc->wwpn = adapter->wwpn; 349 adisc->wwpn = fc_host_port_name(adapter->scsi_host);
349 adisc->wwnn = adapter->wwnn; 350 adisc->wwnn = fc_host_node_name(adapter->scsi_host);
350 adisc->nport_id = adapter->s_id; 351 adisc->nport_id = fc_host_port_id(adapter->scsi_host);
351 ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x " 352 ZFCP_LOG_INFO("ADISC request from s_id 0x%08x to d_id 0x%08x "
352 "(wwpn=0x%016Lx, wwnn=0x%016Lx, " 353 "(wwpn=0x%016Lx, wwnn=0x%016Lx, "
353 "hard_nport_id=0x%08x, nport_id=0x%08x)\n", 354 "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
354 adapter->s_id, send_els->d_id, (wwn_t) adisc->wwpn, 355 adisc->nport_id, send_els->d_id, (wwn_t) adisc->wwpn,
355 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 356 (wwn_t) adisc->wwnn, adisc->hard_nport_id,
356 adisc->nport_id); 357 adisc->nport_id);
357 358
@@ -404,7 +405,7 @@ zfcp_erp_adisc_handler(unsigned long data)
404 struct zfcp_send_els *send_els; 405 struct zfcp_send_els *send_els;
405 struct zfcp_port *port; 406 struct zfcp_port *port;
406 struct zfcp_adapter *adapter; 407 struct zfcp_adapter *adapter;
407 fc_id_t d_id; 408 u32 d_id;
408 struct zfcp_ls_adisc_acc *adisc; 409 struct zfcp_ls_adisc_acc *adisc;
409 410
410 send_els = (struct zfcp_send_els *) data; 411 send_els = (struct zfcp_send_els *) data;
@@ -435,9 +436,9 @@ zfcp_erp_adisc_handler(unsigned long data)
435 ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id " 436 ZFCP_LOG_INFO("ADISC response from d_id 0x%08x to s_id "
436 "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, " 437 "0x%08x (wwpn=0x%016Lx, wwnn=0x%016Lx, "
437 "hard_nport_id=0x%08x, nport_id=0x%08x)\n", 438 "hard_nport_id=0x%08x, nport_id=0x%08x)\n",
438 d_id, adapter->s_id, (wwn_t) adisc->wwpn, 439 d_id, fc_host_port_id(adapter->scsi_host),
439 (wwn_t) adisc->wwnn, adisc->hard_nport_id, 440 (wwn_t) adisc->wwpn, (wwn_t) adisc->wwnn,
440 adisc->nport_id); 441 adisc->hard_nport_id, adisc->nport_id);
441 442
442 /* set wwnn for port */ 443 /* set wwnn for port */
443 if (port->wwnn == 0) 444 if (port->wwnn == 0)
@@ -886,7 +887,7 @@ static int
886zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) 887zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
887{ 888{
888 int retval = 0; 889 int retval = 0;
889 struct zfcp_fsf_req *fsf_req; 890 struct zfcp_fsf_req *fsf_req = NULL;
890 struct zfcp_adapter *adapter = erp_action->adapter; 891 struct zfcp_adapter *adapter = erp_action->adapter;
891 892
892 if (erp_action->fsf_req) { 893 if (erp_action->fsf_req) {
@@ -896,7 +897,7 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
896 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) 897 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
897 if (fsf_req == erp_action->fsf_req) 898 if (fsf_req == erp_action->fsf_req)
898 break; 899 break;
899 if (fsf_req == erp_action->fsf_req) { 900 if (fsf_req && (fsf_req->erp_action == erp_action)) {
900 /* fsf_req still exists */ 901 /* fsf_req still exists */
901 debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); 902 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
902 debug_event(adapter->erp_dbf, 3, &fsf_req, 903 debug_event(adapter->erp_dbf, 3, &fsf_req,
@@ -2258,16 +2259,21 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2258static int 2259static int
2259zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action) 2260zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *erp_action)
2260{ 2261{
2261 int retval; 2262 int xconfig, xport;
2263
2264 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2265 &erp_action->adapter->status)) {
2266 zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2267 atomic_set(&erp_action->adapter->erp_counter, 0);
2268 return ZFCP_ERP_FAILED;
2269 }
2262 2270
2263 /* do 'exchange configuration data' */ 2271 xconfig = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action);
2264 retval = zfcp_erp_adapter_strategy_open_fsf_xconfig(erp_action); 2272 xport = zfcp_erp_adapter_strategy_open_fsf_xport(erp_action);
2265 if (retval == ZFCP_ERP_FAILED) 2273 if ((xconfig == ZFCP_ERP_FAILED) || (xport == ZFCP_ERP_FAILED))
2266 return retval; 2274 return ZFCP_ERP_FAILED;
2267 2275
2268 /* start the desired number of Status Reads */ 2276 return zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2269 retval = zfcp_erp_adapter_strategy_open_fsf_statusread(erp_action);
2270 return retval;
2271} 2277}
2272 2278
2273/* 2279/*
@@ -2291,7 +2297,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2291 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 2297 atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
2292 &adapter->status); 2298 &adapter->status);
2293 ZFCP_LOG_DEBUG("Doing exchange config data\n"); 2299 ZFCP_LOG_DEBUG("Doing exchange config data\n");
2300 write_lock(&adapter->erp_lock);
2294 zfcp_erp_action_to_running(erp_action); 2301 zfcp_erp_action_to_running(erp_action);
2302 write_unlock(&adapter->erp_lock);
2295 zfcp_erp_timeout_init(erp_action); 2303 zfcp_erp_timeout_init(erp_action);
2296 if (zfcp_fsf_exchange_config_data(erp_action)) { 2304 if (zfcp_fsf_exchange_config_data(erp_action)) {
2297 retval = ZFCP_ERP_FAILED; 2305 retval = ZFCP_ERP_FAILED;
@@ -2348,6 +2356,76 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action)
2348 return retval; 2356 return retval;
2349} 2357}
2350 2358
2359static int
2360zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2361{
2362 int retval = ZFCP_ERP_SUCCEEDED;
2363 int retries;
2364 int sleep;
2365 struct zfcp_adapter *adapter = erp_action->adapter;
2366
2367 atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2368
2369 for (retries = 0; ; retries++) {
2370 ZFCP_LOG_DEBUG("Doing exchange port data\n");
2371 zfcp_erp_action_to_running(erp_action);
2372 zfcp_erp_timeout_init(erp_action);
2373 if (zfcp_fsf_exchange_port_data(erp_action, adapter, NULL)) {
2374 retval = ZFCP_ERP_FAILED;
2375 debug_text_event(adapter->erp_dbf, 5, "a_fstx_xf");
2376 ZFCP_LOG_INFO("error: initiation of exchange of "
2377 "port data failed for adapter %s\n",
2378 zfcp_get_busid_by_adapter(adapter));
2379 break;
2380 }
2381 debug_text_event(adapter->erp_dbf, 6, "a_fstx_xok");
2382 ZFCP_LOG_DEBUG("Xchange underway\n");
2383
2384 /*
2385 * Why this works:
2386 * Both the normal completion handler as well as the timeout
2387 * handler will do an 'up' when the 'exchange port data'
2388 * request completes or times out. Thus, the signal to go on
2389 * won't be lost utilizing this semaphore.
2390 * Furthermore, this 'adapter_reopen' action is
2391 * guaranteed to be the only action being there (highest action
2392 * which prevents other actions from being created).
2393 * Resulting from that, the wake signal recognized here
2394 * _must_ be the one belonging to the 'exchange port
2395 * data' request.
2396 */
2397 down(&adapter->erp_ready_sem);
2398 if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) {
2399 ZFCP_LOG_INFO("error: exchange of port data "
2400 "for adapter %s timed out\n",
2401 zfcp_get_busid_by_adapter(adapter));
2402 break;
2403 }
2404
2405 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2406 &adapter->status))
2407 break;
2408
2409 ZFCP_LOG_DEBUG("host connection still initialising... "
2410 "waiting and retrying...\n");
2411 /* sleep a little bit before retry */
2412 sleep = retries < ZFCP_EXCHANGE_PORT_DATA_SHORT_RETRIES ?
2413 ZFCP_EXCHANGE_PORT_DATA_SHORT_SLEEP :
2414 ZFCP_EXCHANGE_PORT_DATA_LONG_SLEEP;
2415 msleep(jiffies_to_msecs(sleep));
2416 }
2417
2418 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
2419 &adapter->status)) {
2420 ZFCP_LOG_INFO("error: exchange of port data for "
2421 "adapter %s failed\n",
2422 zfcp_get_busid_by_adapter(adapter));
2423 retval = ZFCP_ERP_FAILED;
2424 }
2425
2426 return retval;
2427}
2428
2351/* 2429/*
2352 * function: 2430 * function:
2353 * 2431 *
@@ -3194,11 +3272,19 @@ zfcp_erp_action_enqueue(int action,
3194 /* fall through !!! */ 3272 /* fall through !!! */
3195 3273
3196 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 3274 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
3197 if (atomic_test_mask 3275 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
3198 (ZFCP_STATUS_COMMON_ERP_INUSE, &port->status) 3276 &port->status)) {
3199 && port->erp_action.action == 3277 if (port->erp_action.action !=
3200 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) { 3278 ZFCP_ERP_ACTION_REOPEN_PORT_FORCED) {
3201 debug_text_event(adapter->erp_dbf, 4, "pf_actenq_drp"); 3279 ZFCP_LOG_INFO("dropped erp action %i (port "
3280 "0x%016Lx, action in use: %i)\n",
3281 action, port->wwpn,
3282 port->erp_action.action);
3283 debug_text_event(adapter->erp_dbf, 4,
3284 "pf_actenq_drp");
3285 } else
3286 debug_text_event(adapter->erp_dbf, 4,
3287 "pf_actenq_drpcp");
3202 debug_event(adapter->erp_dbf, 4, &port->wwpn, 3288 debug_event(adapter->erp_dbf, 4, &port->wwpn,
3203 sizeof (wwn_t)); 3289 sizeof (wwn_t));
3204 goto out; 3290 goto out;
@@ -3589,6 +3675,9 @@ zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter)
3589 struct zfcp_port *port; 3675 struct zfcp_port *port;
3590 unsigned long flags; 3676 unsigned long flags;
3591 3677
3678 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
3679 return;
3680
3592 debug_text_event(adapter->erp_dbf, 3, "a_access_recover"); 3681 debug_text_event(adapter->erp_dbf, 3, "a_access_recover");
3593 debug_event(adapter->erp_dbf, 3, &adapter->name, 8); 3682 debug_event(adapter->erp_dbf, 3, &adapter->name, 8);
3594 3683
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index cd98a2de9f8f..c3782261cb5c 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -96,7 +96,8 @@ extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
96extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); 96extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
97 97
98extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); 98extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
99extern int zfcp_fsf_exchange_port_data(struct zfcp_adapter *, 99extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *,
100 struct zfcp_adapter *,
100 struct fsf_qtcb_bottom_port *); 101 struct fsf_qtcb_bottom_port *);
101extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **, 102extern int zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_req **,
102 u32, u32, struct zfcp_sg_list *); 103 u32, u32, struct zfcp_sg_list *);
@@ -109,7 +110,6 @@ extern int zfcp_fsf_req_create(struct zfcp_adapter *, u32, int, mempool_t *,
109extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, 110extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
110 struct zfcp_erp_action *); 111 struct zfcp_erp_action *);
111extern int zfcp_fsf_send_els(struct zfcp_send_els *); 112extern int zfcp_fsf_send_els(struct zfcp_send_els *);
112extern int zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *, int, u32 *);
113extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *, 113extern int zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *,
114 struct zfcp_unit *, 114 struct zfcp_unit *,
115 struct scsi_cmnd *, 115 struct scsi_cmnd *,
@@ -182,9 +182,25 @@ extern void zfcp_erp_port_access_changed(struct zfcp_port *);
182extern void zfcp_erp_unit_access_changed(struct zfcp_unit *); 182extern void zfcp_erp_unit_access_changed(struct zfcp_unit *);
183 183
184/******************************** AUX ****************************************/ 184/******************************** AUX ****************************************/
185extern void zfcp_cmd_dbf_event_fsf(const char *, struct zfcp_fsf_req *, 185extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
186 void *, int); 186extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
187extern void zfcp_cmd_dbf_event_scsi(const char *, struct scsi_cmnd *); 187 struct fsf_status_read_buffer *);
188extern void zfcp_in_els_dbf_event(struct zfcp_adapter *, const char *, 188extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
189 struct fsf_status_read_buffer *, int); 189 unsigned int, unsigned int, unsigned int,
190 int, int);
191
192extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
193extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
194extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
195extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
196extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
197
198extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
199 struct scsi_cmnd *);
200extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
201 struct scsi_cmnd *,
202 struct zfcp_fsf_req *);
203extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
204 struct scsi_cmnd *);
205
190#endif /* ZFCP_EXT_H */ 206#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c007b6424e74..3b0fc1163f5f 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -59,6 +59,8 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *, struct timer_list *);
59static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *); 59static int zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *);
60static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *); 60static int zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *);
61static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); 61static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
62static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
63 struct fsf_link_down_info *);
62static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); 64static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
63static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); 65static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
64 66
@@ -285,51 +287,51 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
285{ 287{
286 int retval = 0; 288 int retval = 0;
287 struct zfcp_adapter *adapter = fsf_req->adapter; 289 struct zfcp_adapter *adapter = fsf_req->adapter;
290 struct fsf_qtcb *qtcb = fsf_req->qtcb;
291 union fsf_prot_status_qual *prot_status_qual =
292 &qtcb->prefix.prot_status_qual;
288 293
289 ZFCP_LOG_DEBUG("QTCB is at %p\n", fsf_req->qtcb); 294 zfcp_hba_dbf_event_fsf_response(fsf_req);
290 295
291 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 296 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
292 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n", 297 ZFCP_LOG_DEBUG("fsf_req 0x%lx has been dismissed\n",
293 (unsigned long) fsf_req); 298 (unsigned long) fsf_req);
294 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR | 299 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR |
295 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */ 300 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
296 zfcp_cmd_dbf_event_fsf("dismiss", fsf_req, NULL, 0);
297 goto skip_protstatus; 301 goto skip_protstatus;
298 } 302 }
299 303
300 /* log additional information provided by FSF (if any) */ 304 /* log additional information provided by FSF (if any) */
301 if (unlikely(fsf_req->qtcb->header.log_length)) { 305 if (unlikely(qtcb->header.log_length)) {
302 /* do not trust them ;-) */ 306 /* do not trust them ;-) */
303 if (fsf_req->qtcb->header.log_start > sizeof(struct fsf_qtcb)) { 307 if (qtcb->header.log_start > sizeof(struct fsf_qtcb)) {
304 ZFCP_LOG_NORMAL 308 ZFCP_LOG_NORMAL
305 ("bug: ULP (FSF logging) log data starts " 309 ("bug: ULP (FSF logging) log data starts "
306 "beyond end of packet header. Ignored. " 310 "beyond end of packet header. Ignored. "
307 "(start=%i, size=%li)\n", 311 "(start=%i, size=%li)\n",
308 fsf_req->qtcb->header.log_start, 312 qtcb->header.log_start,
309 sizeof(struct fsf_qtcb)); 313 sizeof(struct fsf_qtcb));
310 goto forget_log; 314 goto forget_log;
311 } 315 }
312 if ((size_t) (fsf_req->qtcb->header.log_start + 316 if ((size_t) (qtcb->header.log_start + qtcb->header.log_length)
313 fsf_req->qtcb->header.log_length)
314 > sizeof(struct fsf_qtcb)) { 317 > sizeof(struct fsf_qtcb)) {
315 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends " 318 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
316 "beyond end of packet header. Ignored. " 319 "beyond end of packet header. Ignored. "
317 "(start=%i, length=%i, size=%li)\n", 320 "(start=%i, length=%i, size=%li)\n",
318 fsf_req->qtcb->header.log_start, 321 qtcb->header.log_start,
319 fsf_req->qtcb->header.log_length, 322 qtcb->header.log_length,
320 sizeof(struct fsf_qtcb)); 323 sizeof(struct fsf_qtcb));
321 goto forget_log; 324 goto forget_log;
322 } 325 }
323 ZFCP_LOG_TRACE("ULP log data: \n"); 326 ZFCP_LOG_TRACE("ULP log data: \n");
324 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, 327 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
325 (char *) fsf_req->qtcb + 328 (char *) qtcb + qtcb->header.log_start,
326 fsf_req->qtcb->header.log_start, 329 qtcb->header.log_length);
327 fsf_req->qtcb->header.log_length);
328 } 330 }
329 forget_log: 331 forget_log:
330 332
331 /* evaluate FSF Protocol Status */ 333 /* evaluate FSF Protocol Status */
332 switch (fsf_req->qtcb->prefix.prot_status) { 334 switch (qtcb->prefix.prot_status) {
333 335
334 case FSF_PROT_GOOD: 336 case FSF_PROT_GOOD:
335 case FSF_PROT_FSF_STATUS_PRESENTED: 337 case FSF_PROT_FSF_STATUS_PRESENTED:
@@ -340,14 +342,9 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
340 "microcode of version 0x%x, the device driver " 342 "microcode of version 0x%x, the device driver "
341 "only supports 0x%x. Aborting.\n", 343 "only supports 0x%x. Aborting.\n",
342 zfcp_get_busid_by_adapter(adapter), 344 zfcp_get_busid_by_adapter(adapter),
343 fsf_req->qtcb->prefix.prot_status_qual. 345 prot_status_qual->version_error.fsf_version,
344 version_error.fsf_version, ZFCP_QTCB_VERSION); 346 ZFCP_QTCB_VERSION);
345 /* stop operation for this adapter */
346 debug_text_exception(adapter->erp_dbf, 0, "prot_ver_err");
347 zfcp_erp_adapter_shutdown(adapter, 0); 347 zfcp_erp_adapter_shutdown(adapter, 0);
348 zfcp_cmd_dbf_event_fsf("qverserr", fsf_req,
349 &fsf_req->qtcb->prefix.prot_status_qual,
350 sizeof (union fsf_prot_status_qual));
351 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 348 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
352 break; 349 break;
353 350
@@ -355,16 +352,10 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
355 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between " 352 ZFCP_LOG_NORMAL("bug: Sequence number mismatch between "
356 "driver (0x%x) and adapter %s (0x%x). " 353 "driver (0x%x) and adapter %s (0x%x). "
357 "Restarting all operations on this adapter.\n", 354 "Restarting all operations on this adapter.\n",
358 fsf_req->qtcb->prefix.req_seq_no, 355 qtcb->prefix.req_seq_no,
359 zfcp_get_busid_by_adapter(adapter), 356 zfcp_get_busid_by_adapter(adapter),
360 fsf_req->qtcb->prefix.prot_status_qual. 357 prot_status_qual->sequence_error.exp_req_seq_no);
361 sequence_error.exp_req_seq_no);
362 debug_text_exception(adapter->erp_dbf, 0, "prot_seq_err");
363 /* restart operation on this adapter */
364 zfcp_erp_adapter_reopen(adapter, 0); 358 zfcp_erp_adapter_reopen(adapter, 0);
365 zfcp_cmd_dbf_event_fsf("seqnoerr", fsf_req,
366 &fsf_req->qtcb->prefix.prot_status_qual,
367 sizeof (union fsf_prot_status_qual));
368 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; 359 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
369 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 360 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
370 break; 361 break;
@@ -375,116 +366,35 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
375 "that used on adapter %s. " 366 "that used on adapter %s. "
376 "Stopping all operations on this adapter.\n", 367 "Stopping all operations on this adapter.\n",
377 zfcp_get_busid_by_adapter(adapter)); 368 zfcp_get_busid_by_adapter(adapter));
378 debug_text_exception(adapter->erp_dbf, 0, "prot_unsup_qtcb");
379 zfcp_erp_adapter_shutdown(adapter, 0); 369 zfcp_erp_adapter_shutdown(adapter, 0);
380 zfcp_cmd_dbf_event_fsf("unsqtcbt", fsf_req,
381 &fsf_req->qtcb->prefix.prot_status_qual,
382 sizeof (union fsf_prot_status_qual));
383 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 370 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
384 break; 371 break;
385 372
386 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 373 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
387 zfcp_cmd_dbf_event_fsf("hconinit", fsf_req,
388 &fsf_req->qtcb->prefix.prot_status_qual,
389 sizeof (union fsf_prot_status_qual));
390 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 374 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
391 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 375 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
392 &(adapter->status)); 376 &(adapter->status));
393 debug_text_event(adapter->erp_dbf, 3, "prot_con_init");
394 break; 377 break;
395 378
396 case FSF_PROT_DUPLICATE_REQUEST_ID: 379 case FSF_PROT_DUPLICATE_REQUEST_ID:
397 if (fsf_req->qtcb) {
398 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx " 380 ZFCP_LOG_NORMAL("bug: The request identifier 0x%Lx "
399 "to the adapter %s is ambiguous. " 381 "to the adapter %s is ambiguous. "
400 "Stopping all operations on this " 382 "Stopping all operations on this adapter.\n",
401 "adapter.\n", 383 *(unsigned long long*)
402 *(unsigned long long *) 384 (&qtcb->bottom.support.req_handle),
403 (&fsf_req->qtcb->bottom.support.
404 req_handle),
405 zfcp_get_busid_by_adapter(adapter));
406 } else {
407 ZFCP_LOG_NORMAL("bug: The request identifier %p "
408 "to the adapter %s is ambiguous. "
409 "Stopping all operations on this "
410 "adapter. "
411 "(bug: got this for an unsolicited "
412 "status read request)\n",
413 fsf_req,
414 zfcp_get_busid_by_adapter(adapter)); 385 zfcp_get_busid_by_adapter(adapter));
415 }
416 debug_text_exception(adapter->erp_dbf, 0, "prot_dup_id");
417 zfcp_erp_adapter_shutdown(adapter, 0); 386 zfcp_erp_adapter_shutdown(adapter, 0);
418 zfcp_cmd_dbf_event_fsf("dupreqid", fsf_req,
419 &fsf_req->qtcb->prefix.prot_status_qual,
420 sizeof (union fsf_prot_status_qual));
421 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 387 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
422 break; 388 break;
423 389
424 case FSF_PROT_LINK_DOWN: 390 case FSF_PROT_LINK_DOWN:
425 /* 391 zfcp_fsf_link_down_info_eval(adapter,
426 * 'test and set' is not atomic here - 392 &prot_status_qual->link_down_info);
427 * it's ok as long as calls to our response queue handler
428 * (and thus execution of this code here) are serialized
429 * by the qdio module
430 */
431 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
432 &adapter->status)) {
433 switch (fsf_req->qtcb->prefix.prot_status_qual.
434 locallink_error.code) {
435 case FSF_PSQ_LINK_NOLIGHT:
436 ZFCP_LOG_INFO("The local link to adapter %s "
437 "is down (no light detected).\n",
438 zfcp_get_busid_by_adapter(
439 adapter));
440 break;
441 case FSF_PSQ_LINK_WRAPPLUG:
442 ZFCP_LOG_INFO("The local link to adapter %s "
443 "is down (wrap plug detected).\n",
444 zfcp_get_busid_by_adapter(
445 adapter));
446 break;
447 case FSF_PSQ_LINK_NOFCP:
448 ZFCP_LOG_INFO("The local link to adapter %s "
449 "is down (adjacent node on "
450 "link does not support FCP).\n",
451 zfcp_get_busid_by_adapter(
452 adapter));
453 break;
454 default:
455 ZFCP_LOG_INFO("The local link to adapter %s "
456 "is down "
457 "(warning: unknown reason "
458 "code).\n",
459 zfcp_get_busid_by_adapter(
460 adapter));
461 break;
462
463 }
464 /*
465 * Due to the 'erp failed' flag the adapter won't
466 * be recovered but will be just set to 'blocked'
467 * state. All subordinary devices will have state
468 * 'blocked' and 'erp failed', too.
469 * Thus the adapter is still able to provide
470 * 'link up' status without being flooded with
471 * requests.
472 * (note: even 'close port' is not permitted)
473 */
474 ZFCP_LOG_INFO("Stopping all operations for adapter "
475 "%s.\n",
476 zfcp_get_busid_by_adapter(adapter));
477 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
478 ZFCP_STATUS_COMMON_ERP_FAILED,
479 &adapter->status);
480 zfcp_erp_adapter_reopen(adapter, 0);
481 }
482 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 393 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
483 break; 394 break;
484 395
485 case FSF_PROT_REEST_QUEUE: 396 case FSF_PROT_REEST_QUEUE:
486 debug_text_event(adapter->erp_dbf, 1, "prot_reest_queue"); 397 ZFCP_LOG_NORMAL("The local link to adapter with "
487 ZFCP_LOG_INFO("The local link to adapter with "
488 "%s was re-plugged. " 398 "%s was re-plugged. "
489 "Re-starting operations on this adapter.\n", 399 "Re-starting operations on this adapter.\n",
490 zfcp_get_busid_by_adapter(adapter)); 400 zfcp_get_busid_by_adapter(adapter));
@@ -495,9 +405,6 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
495 zfcp_erp_adapter_reopen(adapter, 405 zfcp_erp_adapter_reopen(adapter,
496 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 406 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED
497 | ZFCP_STATUS_COMMON_ERP_FAILED); 407 | ZFCP_STATUS_COMMON_ERP_FAILED);
498 zfcp_cmd_dbf_event_fsf("reestque", fsf_req,
499 &fsf_req->qtcb->prefix.prot_status_qual,
500 sizeof (union fsf_prot_status_qual));
501 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 408 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
502 break; 409 break;
503 410
@@ -507,12 +414,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
507 "Restarting all operations on this " 414 "Restarting all operations on this "
508 "adapter.\n", 415 "adapter.\n",
509 zfcp_get_busid_by_adapter(adapter)); 416 zfcp_get_busid_by_adapter(adapter));
510 debug_text_event(adapter->erp_dbf, 0, "prot_err_sta");
511 /* restart operation on this adapter */
512 zfcp_erp_adapter_reopen(adapter, 0); 417 zfcp_erp_adapter_reopen(adapter, 0);
513 zfcp_cmd_dbf_event_fsf("proterrs", fsf_req,
514 &fsf_req->qtcb->prefix.prot_status_qual,
515 sizeof (union fsf_prot_status_qual));
516 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY; 418 fsf_req->status |= ZFCP_STATUS_FSFREQ_RETRY;
517 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 419 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
518 break; 420 break;
@@ -524,11 +426,7 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
524 "Stopping all operations on this adapter. " 426 "Stopping all operations on this adapter. "
525 "(debug info 0x%x).\n", 427 "(debug info 0x%x).\n",
526 zfcp_get_busid_by_adapter(adapter), 428 zfcp_get_busid_by_adapter(adapter),
527 fsf_req->qtcb->prefix.prot_status); 429 qtcb->prefix.prot_status);
528 debug_text_event(adapter->erp_dbf, 0, "prot_inval:");
529 debug_exception(adapter->erp_dbf, 0,
530 &fsf_req->qtcb->prefix.prot_status,
531 sizeof (u32));
532 zfcp_erp_adapter_shutdown(adapter, 0); 430 zfcp_erp_adapter_shutdown(adapter, 0);
533 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 431 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
534 } 432 }
@@ -568,28 +466,18 @@ zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *fsf_req)
568 "(debug info 0x%x).\n", 466 "(debug info 0x%x).\n",
569 zfcp_get_busid_by_adapter(fsf_req->adapter), 467 zfcp_get_busid_by_adapter(fsf_req->adapter),
570 fsf_req->qtcb->header.fsf_command); 468 fsf_req->qtcb->header.fsf_command);
571 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
572 "fsf_s_unknown");
573 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0); 469 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
574 zfcp_cmd_dbf_event_fsf("unknownc", fsf_req,
575 &fsf_req->qtcb->header.fsf_status_qual,
576 sizeof (union fsf_status_qual));
577 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 470 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
578 break; 471 break;
579 472
580 case FSF_FCP_RSP_AVAILABLE: 473 case FSF_FCP_RSP_AVAILABLE:
581 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the " 474 ZFCP_LOG_DEBUG("FCP Sense data will be presented to the "
582 "SCSI stack.\n"); 475 "SCSI stack.\n");
583 debug_text_event(fsf_req->adapter->erp_dbf, 3, "fsf_s_rsp");
584 break; 476 break;
585 477
586 case FSF_ADAPTER_STATUS_AVAILABLE: 478 case FSF_ADAPTER_STATUS_AVAILABLE:
587 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_s_astatus");
588 zfcp_fsf_fsfstatus_qual_eval(fsf_req); 479 zfcp_fsf_fsfstatus_qual_eval(fsf_req);
589 break; 480 break;
590
591 default:
592 break;
593 } 481 }
594 482
595 skip_fsfstatus: 483 skip_fsfstatus:
@@ -617,44 +505,28 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
617 505
618 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) { 506 switch (fsf_req->qtcb->header.fsf_status_qual.word[0]) {
619 case FSF_SQ_FCP_RSP_AVAILABLE: 507 case FSF_SQ_FCP_RSP_AVAILABLE:
620 debug_text_event(fsf_req->adapter->erp_dbf, 4, "fsf_sq_rsp");
621 break; 508 break;
622 case FSF_SQ_RETRY_IF_POSSIBLE: 509 case FSF_SQ_RETRY_IF_POSSIBLE:
623 /* The SCSI-stack may now issue retries or escalate */ 510 /* The SCSI-stack may now issue retries or escalate */
624 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_retry");
625 zfcp_cmd_dbf_event_fsf("sqretry", fsf_req,
626 &fsf_req->qtcb->header.fsf_status_qual,
627 sizeof (union fsf_status_qual));
628 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 511 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
629 break; 512 break;
630 case FSF_SQ_COMMAND_ABORTED: 513 case FSF_SQ_COMMAND_ABORTED:
631 /* Carry the aborted state on to upper layer */ 514 /* Carry the aborted state on to upper layer */
632 debug_text_event(fsf_req->adapter->erp_dbf, 2, "fsf_sq_abort");
633 zfcp_cmd_dbf_event_fsf("sqabort", fsf_req,
634 &fsf_req->qtcb->header.fsf_status_qual,
635 sizeof (union fsf_status_qual));
636 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED; 515 fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
637 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 516 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
638 break; 517 break;
639 case FSF_SQ_NO_RECOM: 518 case FSF_SQ_NO_RECOM:
640 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
641 "fsf_sq_no_rec");
642 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a" 519 ZFCP_LOG_NORMAL("bug: No recommendation could be given for a"
643 "problem on the adapter %s " 520 "problem on the adapter %s "
644 "Stopping all operations on this adapter. ", 521 "Stopping all operations on this adapter. ",
645 zfcp_get_busid_by_adapter(fsf_req->adapter)); 522 zfcp_get_busid_by_adapter(fsf_req->adapter));
646 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0); 523 zfcp_erp_adapter_shutdown(fsf_req->adapter, 0);
647 zfcp_cmd_dbf_event_fsf("sqnrecom", fsf_req,
648 &fsf_req->qtcb->header.fsf_status_qual,
649 sizeof (union fsf_status_qual));
650 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 524 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
651 break; 525 break;
652 case FSF_SQ_ULP_PROGRAMMING_ERROR: 526 case FSF_SQ_ULP_PROGRAMMING_ERROR:
653 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer " 527 ZFCP_LOG_NORMAL("error: not enough SBALs for data transfer "
654 "(adapter %s)\n", 528 "(adapter %s)\n",
655 zfcp_get_busid_by_adapter(fsf_req->adapter)); 529 zfcp_get_busid_by_adapter(fsf_req->adapter));
656 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
657 "fsf_sq_ulp_err");
658 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 530 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
659 break; 531 break;
660 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 532 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
@@ -668,13 +540,6 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
668 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 540 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
669 (char *) &fsf_req->qtcb->header.fsf_status_qual, 541 (char *) &fsf_req->qtcb->header.fsf_status_qual,
670 sizeof (union fsf_status_qual)); 542 sizeof (union fsf_status_qual));
671 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf_sq_inval:");
672 debug_exception(fsf_req->adapter->erp_dbf, 0,
673 &fsf_req->qtcb->header.fsf_status_qual.word[0],
674 sizeof (u32));
675 zfcp_cmd_dbf_event_fsf("squndef", fsf_req,
676 &fsf_req->qtcb->header.fsf_status_qual,
677 sizeof (union fsf_status_qual));
678 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 543 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
679 break; 544 break;
680 } 545 }
@@ -682,6 +547,110 @@ zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *fsf_req)
682 return retval; 547 return retval;
683} 548}
684 549
550/**
551 * zfcp_fsf_link_down_info_eval - evaluate link down information block
552 */
553static void
554zfcp_fsf_link_down_info_eval(struct zfcp_adapter *adapter,
555 struct fsf_link_down_info *link_down)
556{
557 switch (link_down->error_code) {
558 case FSF_PSQ_LINK_NO_LIGHT:
559 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
560 "(no light detected)\n",
561 zfcp_get_busid_by_adapter(adapter));
562 break;
563 case FSF_PSQ_LINK_WRAP_PLUG:
564 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
565 "(wrap plug detected)\n",
566 zfcp_get_busid_by_adapter(adapter));
567 break;
568 case FSF_PSQ_LINK_NO_FCP:
569 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
570 "(adjacent node on link does not support FCP)\n",
571 zfcp_get_busid_by_adapter(adapter));
572 break;
573 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
574 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
575 "(firmware update in progress)\n",
576 zfcp_get_busid_by_adapter(adapter));
577 break;
578 case FSF_PSQ_LINK_INVALID_WWPN:
579 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
580 "(duplicate or invalid WWPN detected)\n",
581 zfcp_get_busid_by_adapter(adapter));
582 break;
583 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
584 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
585 "(no support for NPIV by Fabric)\n",
586 zfcp_get_busid_by_adapter(adapter));
587 break;
588 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
589 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
590 "(out of resource in FCP daughtercard)\n",
591 zfcp_get_busid_by_adapter(adapter));
592 break;
593 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
594 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
595 "(out of resource in Fabric)\n",
596 zfcp_get_busid_by_adapter(adapter));
597 break;
598 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
599 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
600 "(unable to Fabric login)\n",
601 zfcp_get_busid_by_adapter(adapter));
602 break;
603 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
604 ZFCP_LOG_NORMAL("WWPN assignment file corrupted on adapter %s\n",
605 zfcp_get_busid_by_adapter(adapter));
606 break;
607 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
608 ZFCP_LOG_NORMAL("Mode table corrupted on adapter %s\n",
609 zfcp_get_busid_by_adapter(adapter));
610 break;
611 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
612 ZFCP_LOG_NORMAL("No WWPN for assignment table on adapter %s\n",
613 zfcp_get_busid_by_adapter(adapter));
614 break;
615 default:
616 ZFCP_LOG_NORMAL("The local link to adapter %s is down "
617 "(warning: unknown reason code %d)\n",
618 zfcp_get_busid_by_adapter(adapter),
619 link_down->error_code);
620 }
621
622 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
623 ZFCP_LOG_DEBUG("Debug information to link down: "
624 "primary_status=0x%02x "
625 "ioerr_code=0x%02x "
626 "action_code=0x%02x "
627 "reason_code=0x%02x "
628 "explanation_code=0x%02x "
629 "vendor_specific_code=0x%02x\n",
630 link_down->primary_status,
631 link_down->ioerr_code,
632 link_down->action_code,
633 link_down->reason_code,
634 link_down->explanation_code,
635 link_down->vendor_specific_code);
636
637 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
638 &adapter->status)) {
639 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
640 &adapter->status);
641 switch (link_down->error_code) {
642 case FSF_PSQ_LINK_NO_LIGHT:
643 case FSF_PSQ_LINK_WRAP_PLUG:
644 case FSF_PSQ_LINK_NO_FCP:
645 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
646 zfcp_erp_adapter_reopen(adapter, 0);
647 break;
648 default:
649 zfcp_erp_adapter_failed(adapter);
650 }
651 }
652}
653
685/* 654/*
686 * function: zfcp_fsf_req_dispatch 655 * function: zfcp_fsf_req_dispatch
687 * 656 *
@@ -696,11 +665,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
696 struct zfcp_adapter *adapter = fsf_req->adapter; 665 struct zfcp_adapter *adapter = fsf_req->adapter;
697 int retval = 0; 666 int retval = 0;
698 667
699 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
700 ZFCP_LOG_TRACE("fsf_req=%p, QTCB=%p\n", fsf_req, fsf_req->qtcb);
701 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE,
702 (char *) fsf_req->qtcb, sizeof(struct fsf_qtcb));
703 }
704 668
705 switch (fsf_req->fsf_command) { 669 switch (fsf_req->fsf_command) {
706 670
@@ -760,13 +724,13 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
760 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 724 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
761 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is " 725 ZFCP_LOG_NORMAL("bug: Command issued by the device driver is "
762 "not supported by the adapter %s\n", 726 "not supported by the adapter %s\n",
763 zfcp_get_busid_by_adapter(fsf_req->adapter)); 727 zfcp_get_busid_by_adapter(adapter));
764 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command) 728 if (fsf_req->fsf_command != fsf_req->qtcb->header.fsf_command)
765 ZFCP_LOG_NORMAL 729 ZFCP_LOG_NORMAL
766 ("bug: Command issued by the device driver differs " 730 ("bug: Command issued by the device driver differs "
767 "from the command returned by the adapter %s " 731 "from the command returned by the adapter %s "
768 "(debug info 0x%x, 0x%x).\n", 732 "(debug info 0x%x, 0x%x).\n",
769 zfcp_get_busid_by_adapter(fsf_req->adapter), 733 zfcp_get_busid_by_adapter(adapter),
770 fsf_req->fsf_command, 734 fsf_req->fsf_command,
771 fsf_req->qtcb->header.fsf_command); 735 fsf_req->qtcb->header.fsf_command);
772 } 736 }
@@ -774,8 +738,6 @@ zfcp_fsf_req_dispatch(struct zfcp_fsf_req *fsf_req)
774 if (!erp_action) 738 if (!erp_action)
775 return retval; 739 return retval;
776 740
777 debug_text_event(adapter->erp_dbf, 3, "a_frh");
778 debug_event(adapter->erp_dbf, 3, &erp_action->action, sizeof (int));
779 zfcp_erp_async_handler(erp_action, 0); 741 zfcp_erp_async_handler(erp_action, 0);
780 742
781 return retval; 743 return retval;
@@ -821,7 +783,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
821 goto failed_buf; 783 goto failed_buf;
822 } 784 }
823 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer)); 785 memset(status_buffer, 0, sizeof (struct fsf_status_read_buffer));
824 fsf_req->data.status_read.buffer = status_buffer; 786 fsf_req->data = (unsigned long) status_buffer;
825 787
826 /* insert pointer to respective buffer */ 788 /* insert pointer to respective buffer */
827 sbale = zfcp_qdio_sbale_curr(fsf_req); 789 sbale = zfcp_qdio_sbale_curr(fsf_req);
@@ -846,6 +808,7 @@ zfcp_fsf_status_read(struct zfcp_adapter *adapter, int req_flags)
846 failed_buf: 808 failed_buf:
847 zfcp_fsf_req_free(fsf_req); 809 zfcp_fsf_req_free(fsf_req);
848 failed_req_create: 810 failed_req_create:
811 zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
849 out: 812 out:
850 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags); 813 write_unlock_irqrestore(&adapter->request_queue.queue_lock, lock_flags);
851 return retval; 814 return retval;
@@ -859,7 +822,7 @@ zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *fsf_req)
859 struct zfcp_port *port; 822 struct zfcp_port *port;
860 unsigned long flags; 823 unsigned long flags;
861 824
862 status_buffer = fsf_req->data.status_read.buffer; 825 status_buffer = (struct fsf_status_read_buffer *) fsf_req->data;
863 adapter = fsf_req->adapter; 826 adapter = fsf_req->adapter;
864 827
865 read_lock_irqsave(&zfcp_data.config_lock, flags); 828 read_lock_irqsave(&zfcp_data.config_lock, flags);
@@ -918,38 +881,33 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
918 int retval = 0; 881 int retval = 0;
919 struct zfcp_adapter *adapter = fsf_req->adapter; 882 struct zfcp_adapter *adapter = fsf_req->adapter;
920 struct fsf_status_read_buffer *status_buffer = 883 struct fsf_status_read_buffer *status_buffer =
921 fsf_req->data.status_read.buffer; 884 (struct fsf_status_read_buffer *) fsf_req->data;
922 885
923 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 886 if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
887 zfcp_hba_dbf_event_fsf_unsol("dism", adapter, status_buffer);
924 mempool_free(status_buffer, adapter->pool.data_status_read); 888 mempool_free(status_buffer, adapter->pool.data_status_read);
925 zfcp_fsf_req_free(fsf_req); 889 zfcp_fsf_req_free(fsf_req);
926 goto out; 890 goto out;
927 } 891 }
928 892
893 zfcp_hba_dbf_event_fsf_unsol("read", adapter, status_buffer);
894
929 switch (status_buffer->status_type) { 895 switch (status_buffer->status_type) {
930 896
931 case FSF_STATUS_READ_PORT_CLOSED: 897 case FSF_STATUS_READ_PORT_CLOSED:
932 debug_text_event(adapter->erp_dbf, 3, "unsol_pclosed:");
933 debug_event(adapter->erp_dbf, 3,
934 &status_buffer->d_id, sizeof (u32));
935 zfcp_fsf_status_read_port_closed(fsf_req); 898 zfcp_fsf_status_read_port_closed(fsf_req);
936 break; 899 break;
937 900
938 case FSF_STATUS_READ_INCOMING_ELS: 901 case FSF_STATUS_READ_INCOMING_ELS:
939 debug_text_event(adapter->erp_dbf, 3, "unsol_els:");
940 zfcp_fsf_incoming_els(fsf_req); 902 zfcp_fsf_incoming_els(fsf_req);
941 break; 903 break;
942 904
943 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 905 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
944 debug_text_event(adapter->erp_dbf, 3, "unsol_sense:");
945 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n", 906 ZFCP_LOG_INFO("unsolicited sense data received (adapter %s)\n",
946 zfcp_get_busid_by_adapter(adapter)); 907 zfcp_get_busid_by_adapter(adapter));
947 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, (char *) status_buffer,
948 sizeof(struct fsf_status_read_buffer));
949 break; 908 break;
950 909
951 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 910 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
952 debug_text_event(adapter->erp_dbf, 3, "unsol_bit_err:");
953 ZFCP_LOG_NORMAL("Bit error threshold data received:\n"); 911 ZFCP_LOG_NORMAL("Bit error threshold data received:\n");
954 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL, 912 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
955 (char *) status_buffer, 913 (char *) status_buffer,
@@ -957,17 +915,32 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
957 break; 915 break;
958 916
959 case FSF_STATUS_READ_LINK_DOWN: 917 case FSF_STATUS_READ_LINK_DOWN:
960 debug_text_event(adapter->erp_dbf, 0, "unsol_link_down:"); 918 switch (status_buffer->status_subtype) {
961 ZFCP_LOG_INFO("Local link to adapter %s is down\n", 919 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
920 ZFCP_LOG_INFO("Physical link to adapter %s is down\n",
921 zfcp_get_busid_by_adapter(adapter));
922 break;
923 case FSF_STATUS_READ_SUB_FDISC_FAILED:
924 ZFCP_LOG_INFO("Local link to adapter %s is down "
925 "due to failed FDISC login\n",
962 zfcp_get_busid_by_adapter(adapter)); 926 zfcp_get_busid_by_adapter(adapter));
963 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 927 break;
964 &adapter->status); 928 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
965 zfcp_erp_adapter_failed(adapter); 929 ZFCP_LOG_INFO("Local link to adapter %s is down "
930 "due to firmware update on adapter\n",
931 zfcp_get_busid_by_adapter(adapter));
932 break;
933 default:
934 ZFCP_LOG_INFO("Local link to adapter %s is down "
935 "due to unknown reason\n",
936 zfcp_get_busid_by_adapter(adapter));
937 };
938 zfcp_fsf_link_down_info_eval(adapter,
939 (struct fsf_link_down_info *) &status_buffer->payload);
966 break; 940 break;
967 941
968 case FSF_STATUS_READ_LINK_UP: 942 case FSF_STATUS_READ_LINK_UP:
969 debug_text_event(adapter->erp_dbf, 2, "unsol_link_up:"); 943 ZFCP_LOG_NORMAL("Local link to adapter %s was replugged. "
970 ZFCP_LOG_INFO("Local link to adapter %s was replugged. "
971 "Restarting operations on this adapter\n", 944 "Restarting operations on this adapter\n",
972 zfcp_get_busid_by_adapter(adapter)); 945 zfcp_get_busid_by_adapter(adapter));
973 /* All ports should be marked as ready to run again */ 946 /* All ports should be marked as ready to run again */
@@ -980,35 +953,40 @@ zfcp_fsf_status_read_handler(struct zfcp_fsf_req *fsf_req)
980 break; 953 break;
981 954
982 case FSF_STATUS_READ_CFDC_UPDATED: 955 case FSF_STATUS_READ_CFDC_UPDATED:
983 debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_update:"); 956 ZFCP_LOG_NORMAL("CFDC has been updated on the adapter %s\n",
984 ZFCP_LOG_INFO("CFDC has been updated on the adapter %s\n",
985 zfcp_get_busid_by_adapter(adapter)); 957 zfcp_get_busid_by_adapter(adapter));
986 zfcp_erp_adapter_access_changed(adapter); 958 zfcp_erp_adapter_access_changed(adapter);
987 break; 959 break;
988 960
989 case FSF_STATUS_READ_CFDC_HARDENED: 961 case FSF_STATUS_READ_CFDC_HARDENED:
990 debug_text_event(adapter->erp_dbf, 2, "unsol_cfdc_harden:");
991 switch (status_buffer->status_subtype) { 962 switch (status_buffer->status_subtype) {
992 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE: 963 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE:
993 ZFCP_LOG_INFO("CFDC of adapter %s saved on SE\n", 964 ZFCP_LOG_NORMAL("CFDC of adapter %s saved on SE\n",
994 zfcp_get_busid_by_adapter(adapter)); 965 zfcp_get_busid_by_adapter(adapter));
995 break; 966 break;
996 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2: 967 case FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2:
997 ZFCP_LOG_INFO("CFDC of adapter %s has been copied " 968 ZFCP_LOG_NORMAL("CFDC of adapter %s has been copied "
998 "to the secondary SE\n", 969 "to the secondary SE\n",
999 zfcp_get_busid_by_adapter(adapter)); 970 zfcp_get_busid_by_adapter(adapter));
1000 break; 971 break;
1001 default: 972 default:
1002 ZFCP_LOG_INFO("CFDC of adapter %s has been hardened\n", 973 ZFCP_LOG_NORMAL("CFDC of adapter %s has been hardened\n",
1003 zfcp_get_busid_by_adapter(adapter)); 974 zfcp_get_busid_by_adapter(adapter));
1004 } 975 }
1005 break; 976 break;
1006 977
978 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
979 debug_text_event(adapter->erp_dbf, 2, "unsol_features:");
980 ZFCP_LOG_INFO("List of supported features on adapter %s has "
981 "been changed from 0x%08X to 0x%08X\n",
982 zfcp_get_busid_by_adapter(adapter),
983 *(u32*) (status_buffer->payload + 4),
984 *(u32*) (status_buffer->payload));
985 adapter->adapter_features = *(u32*) status_buffer->payload;
986 break;
987
1007 default: 988 default:
1008 debug_text_event(adapter->erp_dbf, 0, "unsol_unknown:"); 989 ZFCP_LOG_NORMAL("warning: An unsolicited status packet of unknown "
1009 debug_exception(adapter->erp_dbf, 0,
1010 &status_buffer->status_type, sizeof (u32));
1011 ZFCP_LOG_NORMAL("bug: An unsolicited status packet of unknown "
1012 "type was received (debug info 0x%x)\n", 990 "type was received (debug info 0x%x)\n",
1013 status_buffer->status_type); 991 status_buffer->status_type);
1014 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n", 992 ZFCP_LOG_DEBUG("Dump of status_read_buffer %p:\n",
@@ -1093,7 +1071,7 @@ zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
1093 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 1071 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1094 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1072 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1095 1073
1096 fsf_req->data.abort_fcp_command.unit = unit; 1074 fsf_req->data = (unsigned long) unit;
1097 1075
1098 /* set handles of unit and its parent port in QTCB */ 1076 /* set handles of unit and its parent port in QTCB */
1099 fsf_req->qtcb->header.lun_handle = unit->handle; 1077 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -1139,7 +1117,7 @@ static int
1139zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req) 1117zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1140{ 1118{
1141 int retval = -EINVAL; 1119 int retval = -EINVAL;
1142 struct zfcp_unit *unit = new_fsf_req->data.abort_fcp_command.unit; 1120 struct zfcp_unit *unit;
1143 unsigned char status_qual = 1121 unsigned char status_qual =
1144 new_fsf_req->qtcb->header.fsf_status_qual.word[0]; 1122 new_fsf_req->qtcb->header.fsf_status_qual.word[0];
1145 1123
@@ -1150,6 +1128,8 @@ zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *new_fsf_req)
1150 goto skip_fsfstatus; 1128 goto skip_fsfstatus;
1151 } 1129 }
1152 1130
1131 unit = (struct zfcp_unit *) new_fsf_req->data;
1132
1153 /* evaluate FSF status in QTCB */ 1133 /* evaluate FSF status in QTCB */
1154 switch (new_fsf_req->qtcb->header.fsf_status) { 1134 switch (new_fsf_req->qtcb->header.fsf_status) {
1155 1135
@@ -1364,7 +1344,7 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1364 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]); 1344 sbale[3].addr = zfcp_sg_to_address(&ct->resp[0]);
1365 sbale[3].length = ct->resp[0].length; 1345 sbale[3].length = ct->resp[0].length;
1366 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; 1346 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1367 } else if (adapter->supported_features & 1347 } else if (adapter->adapter_features &
1368 FSF_FEATURE_ELS_CT_CHAINED_SBALS) { 1348 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1369 /* try to use chained SBALs */ 1349 /* try to use chained SBALs */
1370 bytes = zfcp_qdio_sbals_from_sg(fsf_req, 1350 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1414,7 +1394,9 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1414 fsf_req->qtcb->header.port_handle = port->handle; 1394 fsf_req->qtcb->header.port_handle = port->handle;
1415 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class; 1395 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
1416 fsf_req->qtcb->bottom.support.timeout = ct->timeout; 1396 fsf_req->qtcb->bottom.support.timeout = ct->timeout;
1417 fsf_req->data.send_ct = ct; 1397 fsf_req->data = (unsigned long) ct;
1398
1399 zfcp_san_dbf_event_ct_request(fsf_req);
1418 1400
1419 /* start QDIO request for this FSF request */ 1401 /* start QDIO request for this FSF request */
1420 ret = zfcp_fsf_req_send(fsf_req, ct->timer); 1402 ret = zfcp_fsf_req_send(fsf_req, ct->timer);
@@ -1445,10 +1427,10 @@ zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
1445 * zfcp_fsf_send_ct_handler - handler for Generic Service requests 1427 * zfcp_fsf_send_ct_handler - handler for Generic Service requests
1446 * @fsf_req: pointer to struct zfcp_fsf_req 1428 * @fsf_req: pointer to struct zfcp_fsf_req
1447 * 1429 *
1448 * Data specific for the Generic Service request is passed by 1430 * Data specific for the Generic Service request is passed using
1449 * fsf_req->data.send_ct 1431 * fsf_req->data. There we find the pointer to struct zfcp_send_ct.
1450 * Usually a specific handler for the request is called via 1432 * Usually a specific handler for the CT request is called which is
1451 * fsf_req->data.send_ct->handler at end of this function. 1433 * found in this structure.
1452 */ 1434 */
1453static int 1435static int
1454zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req) 1436zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
@@ -1462,7 +1444,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1462 u16 subtable, rule, counter; 1444 u16 subtable, rule, counter;
1463 1445
1464 adapter = fsf_req->adapter; 1446 adapter = fsf_req->adapter;
1465 send_ct = fsf_req->data.send_ct; 1447 send_ct = (struct zfcp_send_ct *) fsf_req->data;
1466 port = send_ct->port; 1448 port = send_ct->port;
1467 header = &fsf_req->qtcb->header; 1449 header = &fsf_req->qtcb->header;
1468 bottom = &fsf_req->qtcb->bottom.support; 1450 bottom = &fsf_req->qtcb->bottom.support;
@@ -1474,6 +1456,7 @@ zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *fsf_req)
1474 switch (header->fsf_status) { 1456 switch (header->fsf_status) {
1475 1457
1476 case FSF_GOOD: 1458 case FSF_GOOD:
1459 zfcp_san_dbf_event_ct_response(fsf_req);
1477 retval = 0; 1460 retval = 0;
1478 break; 1461 break;
1479 1462
@@ -1634,7 +1617,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1634{ 1617{
1635 volatile struct qdio_buffer_element *sbale; 1618 volatile struct qdio_buffer_element *sbale;
1636 struct zfcp_fsf_req *fsf_req; 1619 struct zfcp_fsf_req *fsf_req;
1637 fc_id_t d_id; 1620 u32 d_id;
1638 struct zfcp_adapter *adapter; 1621 struct zfcp_adapter *adapter;
1639 unsigned long lock_flags; 1622 unsigned long lock_flags;
1640 int bytes; 1623 int bytes;
@@ -1664,7 +1647,7 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1664 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]); 1647 sbale[3].addr = zfcp_sg_to_address(&els->resp[0]);
1665 sbale[3].length = els->resp[0].length; 1648 sbale[3].length = els->resp[0].length;
1666 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY; 1649 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1667 } else if (adapter->supported_features & 1650 } else if (adapter->adapter_features &
1668 FSF_FEATURE_ELS_CT_CHAINED_SBALS) { 1651 FSF_FEATURE_ELS_CT_CHAINED_SBALS) {
1669 /* try to use chained SBALs */ 1652 /* try to use chained SBALs */
1670 bytes = zfcp_qdio_sbals_from_sg(fsf_req, 1653 bytes = zfcp_qdio_sbals_from_sg(fsf_req,
@@ -1714,10 +1697,12 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1714 fsf_req->qtcb->bottom.support.d_id = d_id; 1697 fsf_req->qtcb->bottom.support.d_id = d_id;
1715 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class; 1698 fsf_req->qtcb->bottom.support.service_class = adapter->fc_service_class;
1716 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT; 1699 fsf_req->qtcb->bottom.support.timeout = ZFCP_ELS_TIMEOUT;
1717 fsf_req->data.send_els = els; 1700 fsf_req->data = (unsigned long) els;
1718 1701
1719 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 1702 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
1720 1703
1704 zfcp_san_dbf_event_els_request(fsf_req);
1705
1721 /* start QDIO request for this FSF request */ 1706 /* start QDIO request for this FSF request */
1722 ret = zfcp_fsf_req_send(fsf_req, els->timer); 1707 ret = zfcp_fsf_req_send(fsf_req, els->timer);
1723 if (ret) { 1708 if (ret) {
@@ -1746,23 +1731,23 @@ zfcp_fsf_send_els(struct zfcp_send_els *els)
1746 * zfcp_fsf_send_els_handler - handler for ELS commands 1731 * zfcp_fsf_send_els_handler - handler for ELS commands
1747 * @fsf_req: pointer to struct zfcp_fsf_req 1732 * @fsf_req: pointer to struct zfcp_fsf_req
1748 * 1733 *
1749 * Data specific for the ELS command is passed by 1734 * Data specific for the ELS command is passed using
1750 * fsf_req->data.send_els 1735 * fsf_req->data. There we find the pointer to struct zfcp_send_els.
1751 * Usually a specific handler for the command is called via 1736 * Usually a specific handler for the ELS command is called which is
1752 * fsf_req->data.send_els->handler at end of this function. 1737 * found in this structure.
1753 */ 1738 */
1754static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req) 1739static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1755{ 1740{
1756 struct zfcp_adapter *adapter; 1741 struct zfcp_adapter *adapter;
1757 struct zfcp_port *port; 1742 struct zfcp_port *port;
1758 fc_id_t d_id; 1743 u32 d_id;
1759 struct fsf_qtcb_header *header; 1744 struct fsf_qtcb_header *header;
1760 struct fsf_qtcb_bottom_support *bottom; 1745 struct fsf_qtcb_bottom_support *bottom;
1761 struct zfcp_send_els *send_els; 1746 struct zfcp_send_els *send_els;
1762 int retval = -EINVAL; 1747 int retval = -EINVAL;
1763 u16 subtable, rule, counter; 1748 u16 subtable, rule, counter;
1764 1749
1765 send_els = fsf_req->data.send_els; 1750 send_els = (struct zfcp_send_els *) fsf_req->data;
1766 adapter = send_els->adapter; 1751 adapter = send_els->adapter;
1767 port = send_els->port; 1752 port = send_els->port;
1768 d_id = send_els->d_id; 1753 d_id = send_els->d_id;
@@ -1775,6 +1760,7 @@ static int zfcp_fsf_send_els_handler(struct zfcp_fsf_req *fsf_req)
1775 switch (header->fsf_status) { 1760 switch (header->fsf_status) {
1776 1761
1777 case FSF_GOOD: 1762 case FSF_GOOD:
1763 zfcp_san_dbf_event_els_response(fsf_req);
1778 retval = 0; 1764 retval = 0;
1779 break; 1765 break;
1780 1766
@@ -1954,7 +1940,9 @@ zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1954 1940
1955 erp_action->fsf_req->erp_action = erp_action; 1941 erp_action->fsf_req->erp_action = erp_action;
1956 erp_action->fsf_req->qtcb->bottom.config.feature_selection = 1942 erp_action->fsf_req->qtcb->bottom.config.feature_selection =
1957 (FSF_FEATURE_CFDC | FSF_FEATURE_LUN_SHARING); 1943 FSF_FEATURE_CFDC |
1944 FSF_FEATURE_LUN_SHARING |
1945 FSF_FEATURE_UPDATE_ALERT;
1958 1946
1959 /* start QDIO request for this FSF request */ 1947 /* start QDIO request for this FSF request */
1960 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer); 1948 retval = zfcp_fsf_req_send(erp_action->fsf_req, &erp_action->timer);
@@ -1990,29 +1978,36 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
1990{ 1978{
1991 struct fsf_qtcb_bottom_config *bottom; 1979 struct fsf_qtcb_bottom_config *bottom;
1992 struct zfcp_adapter *adapter = fsf_req->adapter; 1980 struct zfcp_adapter *adapter = fsf_req->adapter;
1981 struct Scsi_Host *shost = adapter->scsi_host;
1993 1982
1994 bottom = &fsf_req->qtcb->bottom.config; 1983 bottom = &fsf_req->qtcb->bottom.config;
1995 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n", 1984 ZFCP_LOG_DEBUG("low/high QTCB version 0x%x/0x%x of FSF\n",
1996 bottom->low_qtcb_version, bottom->high_qtcb_version); 1985 bottom->low_qtcb_version, bottom->high_qtcb_version);
1997 adapter->fsf_lic_version = bottom->lic_version; 1986 adapter->fsf_lic_version = bottom->lic_version;
1998 adapter->supported_features = bottom->supported_features; 1987 adapter->adapter_features = bottom->adapter_features;
1988 adapter->connection_features = bottom->connection_features;
1999 adapter->peer_wwpn = 0; 1989 adapter->peer_wwpn = 0;
2000 adapter->peer_wwnn = 0; 1990 adapter->peer_wwnn = 0;
2001 adapter->peer_d_id = 0; 1991 adapter->peer_d_id = 0;
2002 1992
2003 if (xchg_ok) { 1993 if (xchg_ok) {
2004 adapter->wwnn = bottom->nport_serv_param.wwnn; 1994 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
2005 adapter->wwpn = bottom->nport_serv_param.wwpn; 1995 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
2006 adapter->s_id = bottom->s_id & ZFCP_DID_MASK; 1996 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
1997 fc_host_speed(shost) = bottom->fc_link_speed;
1998 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
2007 adapter->fc_topology = bottom->fc_topology; 1999 adapter->fc_topology = bottom->fc_topology;
2008 adapter->fc_link_speed = bottom->fc_link_speed;
2009 adapter->hydra_version = bottom->adapter_type; 2000 adapter->hydra_version = bottom->adapter_type;
2001 if (adapter->physical_wwpn == 0)
2002 adapter->physical_wwpn = fc_host_port_name(shost);
2003 if (adapter->physical_s_id == 0)
2004 adapter->physical_s_id = fc_host_port_id(shost);
2010 } else { 2005 } else {
2011 adapter->wwnn = 0; 2006 fc_host_node_name(shost) = 0;
2012 adapter->wwpn = 0; 2007 fc_host_port_name(shost) = 0;
2013 adapter->s_id = 0; 2008 fc_host_port_id(shost) = 0;
2009 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
2014 adapter->fc_topology = 0; 2010 adapter->fc_topology = 0;
2015 adapter->fc_link_speed = 0;
2016 adapter->hydra_version = 0; 2011 adapter->hydra_version = 0;
2017 } 2012 }
2018 2013
@@ -2022,26 +2017,28 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2022 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 2017 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
2023 } 2018 }
2024 2019
2025 if(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT){ 2020 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
2026 adapter->hardware_version = bottom->hardware_version; 2021 adapter->hardware_version = bottom->hardware_version;
2027 memcpy(adapter->serial_number, bottom->serial_number, 17); 2022 memcpy(fc_host_serial_number(shost), bottom->serial_number,
2028 EBCASC(adapter->serial_number, sizeof(adapter->serial_number)); 2023 min(FC_SERIAL_NUMBER_SIZE, 17));
2024 EBCASC(fc_host_serial_number(shost),
2025 min(FC_SERIAL_NUMBER_SIZE, 17));
2029 } 2026 }
2030 2027
2031 ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n" 2028 ZFCP_LOG_NORMAL("The adapter %s reported the following characteristics:\n"
2032 "WWNN 0x%016Lx, " 2029 "WWNN 0x%016Lx, "
2033 "WWPN 0x%016Lx, " 2030 "WWPN 0x%016Lx, "
2034 "S_ID 0x%08x,\n" 2031 "S_ID 0x%08x,\n"
2035 "adapter version 0x%x, " 2032 "adapter version 0x%x, "
2036 "LIC version 0x%x, " 2033 "LIC version 0x%x, "
2037 "FC link speed %d Gb/s\n", 2034 "FC link speed %d Gb/s\n",
2038 zfcp_get_busid_by_adapter(adapter), 2035 zfcp_get_busid_by_adapter(adapter),
2039 adapter->wwnn, 2036 (wwn_t) fc_host_node_name(shost),
2040 adapter->wwpn, 2037 (wwn_t) fc_host_port_name(shost),
2041 (unsigned int) adapter->s_id, 2038 fc_host_port_id(shost),
2042 adapter->hydra_version, 2039 adapter->hydra_version,
2043 adapter->fsf_lic_version, 2040 adapter->fsf_lic_version,
2044 adapter->fc_link_speed); 2041 fc_host_speed(shost));
2045 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) { 2042 if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
2046 ZFCP_LOG_NORMAL("error: the adapter %s " 2043 ZFCP_LOG_NORMAL("error: the adapter %s "
2047 "only supports newer control block " 2044 "only supports newer control block "
@@ -2062,7 +2059,6 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
2062 zfcp_erp_adapter_shutdown(adapter, 0); 2059 zfcp_erp_adapter_shutdown(adapter, 0);
2063 return -EIO; 2060 return -EIO;
2064 } 2061 }
2065 zfcp_set_fc_host_attrs(adapter);
2066 return 0; 2062 return 0;
2067} 2063}
2068 2064
@@ -2078,11 +2074,12 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2078{ 2074{
2079 struct fsf_qtcb_bottom_config *bottom; 2075 struct fsf_qtcb_bottom_config *bottom;
2080 struct zfcp_adapter *adapter = fsf_req->adapter; 2076 struct zfcp_adapter *adapter = fsf_req->adapter;
2077 struct fsf_qtcb *qtcb = fsf_req->qtcb;
2081 2078
2082 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2079 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2083 return -EIO; 2080 return -EIO;
2084 2081
2085 switch (fsf_req->qtcb->header.fsf_status) { 2082 switch (qtcb->header.fsf_status) {
2086 2083
2087 case FSF_GOOD: 2084 case FSF_GOOD:
2088 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1)) 2085 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 1))
@@ -2112,7 +2109,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2112 zfcp_erp_adapter_shutdown(adapter, 0); 2109 zfcp_erp_adapter_shutdown(adapter, 0);
2113 return -EIO; 2110 return -EIO;
2114 case FSF_TOPO_FABRIC: 2111 case FSF_TOPO_FABRIC:
2115 ZFCP_LOG_INFO("Switched fabric fibrechannel " 2112 ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
2116 "network detected at adapter %s.\n", 2113 "network detected at adapter %s.\n",
2117 zfcp_get_busid_by_adapter(adapter)); 2114 zfcp_get_busid_by_adapter(adapter));
2118 break; 2115 break;
@@ -2130,7 +2127,7 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2130 zfcp_erp_adapter_shutdown(adapter, 0); 2127 zfcp_erp_adapter_shutdown(adapter, 0);
2131 return -EIO; 2128 return -EIO;
2132 } 2129 }
2133 bottom = &fsf_req->qtcb->bottom.config; 2130 bottom = &qtcb->bottom.config;
2134 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 2131 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
2135 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) " 2132 ZFCP_LOG_NORMAL("bug: Maximum QTCB size (%d bytes) "
2136 "allowed by the adapter %s " 2133 "allowed by the adapter %s "
@@ -2155,12 +2152,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2155 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0)) 2152 if (zfcp_fsf_exchange_config_evaluate(fsf_req, 0))
2156 return -EIO; 2153 return -EIO;
2157 2154
2158 ZFCP_LOG_INFO("Local link to adapter %s is down\n", 2155 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
2159 zfcp_get_busid_by_adapter(adapter)); 2156
2160 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | 2157 zfcp_fsf_link_down_info_eval(adapter,
2161 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, 2158 &qtcb->header.fsf_status_qual.link_down_info);
2162 &adapter->status);
2163 zfcp_erp_adapter_failed(adapter);
2164 break; 2159 break;
2165 default: 2160 default:
2166 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng"); 2161 debug_text_event(fsf_req->adapter->erp_dbf, 0, "fsf-stat-ng");
@@ -2174,11 +2169,13 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
2174 2169
2175/** 2170/**
2176 * zfcp_fsf_exchange_port_data - request information about local port 2171 * zfcp_fsf_exchange_port_data - request information about local port
2172 * @erp_action: ERP action for the adapter for which port data is requested
2177 * @adapter: for which port data is requested 2173 * @adapter: for which port data is requested
2178 * @data: response to exchange port data request 2174 * @data: response to exchange port data request
2179 */ 2175 */
2180int 2176int
2181zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter, 2177zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action,
2178 struct zfcp_adapter *adapter,
2182 struct fsf_qtcb_bottom_port *data) 2179 struct fsf_qtcb_bottom_port *data)
2183{ 2180{
2184 volatile struct qdio_buffer_element *sbale; 2181 volatile struct qdio_buffer_element *sbale;
@@ -2187,7 +2184,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2187 struct zfcp_fsf_req *fsf_req; 2184 struct zfcp_fsf_req *fsf_req;
2188 struct timer_list *timer; 2185 struct timer_list *timer;
2189 2186
2190 if(!(adapter->supported_features & FSF_FEATURE_HBAAPI_MANAGEMENT)){ 2187 if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) {
2191 ZFCP_LOG_INFO("error: exchange port data " 2188 ZFCP_LOG_INFO("error: exchange port data "
2192 "command not supported by adapter %s\n", 2189 "command not supported by adapter %s\n",
2193 zfcp_get_busid_by_adapter(adapter)); 2190 zfcp_get_busid_by_adapter(adapter));
@@ -2211,12 +2208,18 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2211 goto out; 2208 goto out;
2212 } 2209 }
2213 2210
2211 if (erp_action) {
2212 erp_action->fsf_req = fsf_req;
2213 fsf_req->erp_action = erp_action;
2214 }
2215
2216 if (data)
2217 fsf_req->data = (unsigned long) data;
2218
2214 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 2219 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
2215 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; 2220 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2216 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2221 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2217 2222
2218 fsf_req->data.port_data = data;
2219
2220 init_timer(timer); 2223 init_timer(timer);
2221 timer->function = zfcp_fsf_request_timeout_handler; 2224 timer->function = zfcp_fsf_request_timeout_handler;
2222 timer->data = (unsigned long) adapter; 2225 timer->data = (unsigned long) adapter;
@@ -2228,6 +2231,8 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2228 "command on the adapter %s\n", 2231 "command on the adapter %s\n",
2229 zfcp_get_busid_by_adapter(adapter)); 2232 zfcp_get_busid_by_adapter(adapter));
2230 zfcp_fsf_req_free(fsf_req); 2233 zfcp_fsf_req_free(fsf_req);
2234 if (erp_action)
2235 erp_action->fsf_req = NULL;
2231 write_unlock_irqrestore(&adapter->request_queue.queue_lock, 2236 write_unlock_irqrestore(&adapter->request_queue.queue_lock,
2232 lock_flags); 2237 lock_flags);
2233 goto out; 2238 goto out;
@@ -2256,21 +2261,42 @@ zfcp_fsf_exchange_port_data(struct zfcp_adapter *adapter,
2256static void 2261static void
2257zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req) 2262zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *fsf_req)
2258{ 2263{
2259 struct fsf_qtcb_bottom_port *bottom; 2264 struct zfcp_adapter *adapter = fsf_req->adapter;
2260 struct fsf_qtcb_bottom_port *data = fsf_req->data.port_data; 2265 struct Scsi_Host *shost = adapter->scsi_host;
2266 struct fsf_qtcb *qtcb = fsf_req->qtcb;
2267 struct fsf_qtcb_bottom_port *bottom, *data;
2261 2268
2262 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) 2269 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2263 return; 2270 return;
2264 2271
2265 switch (fsf_req->qtcb->header.fsf_status) { 2272 switch (qtcb->header.fsf_status) {
2266 case FSF_GOOD: 2273 case FSF_GOOD:
2267 bottom = &fsf_req->qtcb->bottom.port; 2274 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2268 memcpy(data, bottom, sizeof(*data)); 2275
2276 bottom = &qtcb->bottom.port;
2277 data = (struct fsf_qtcb_bottom_port*) fsf_req->data;
2278 if (data)
2279 memcpy(data, bottom, sizeof(struct fsf_qtcb_bottom_port));
2280 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
2281 adapter->physical_wwpn = bottom->wwpn;
2282 adapter->physical_s_id = bottom->fc_port_id;
2283 } else {
2284 adapter->physical_wwpn = fc_host_port_name(shost);
2285 adapter->physical_s_id = fc_host_port_id(shost);
2286 }
2287 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2288 break;
2289
2290 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
2291 atomic_set_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status);
2292
2293 zfcp_fsf_link_down_info_eval(adapter,
2294 &qtcb->header.fsf_status_qual.link_down_info);
2269 break; 2295 break;
2270 2296
2271 default: 2297 default:
2272 debug_text_event(fsf_req->adapter->erp_dbf, 0, "xchg-port-ng"); 2298 debug_text_event(adapter->erp_dbf, 0, "xchg-port-ng");
2273 debug_event(fsf_req->adapter->erp_dbf, 0, 2299 debug_event(adapter->erp_dbf, 0,
2274 &fsf_req->qtcb->header.fsf_status, sizeof(u32)); 2300 &fsf_req->qtcb->header.fsf_status, sizeof(u32));
2275 } 2301 }
2276} 2302}
@@ -2312,7 +2338,7 @@ zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
2312 2338
2313 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id; 2339 erp_action->fsf_req->qtcb->bottom.support.d_id = erp_action->port->d_id;
2314 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status); 2340 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->port->status);
2315 erp_action->fsf_req->data.open_port.port = erp_action->port; 2341 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2316 erp_action->fsf_req->erp_action = erp_action; 2342 erp_action->fsf_req->erp_action = erp_action;
2317 2343
2318 /* start QDIO request for this FSF request */ 2344 /* start QDIO request for this FSF request */
@@ -2353,7 +2379,7 @@ zfcp_fsf_open_port_handler(struct zfcp_fsf_req *fsf_req)
2353 struct fsf_qtcb_header *header; 2379 struct fsf_qtcb_header *header;
2354 u16 subtable, rule, counter; 2380 u16 subtable, rule, counter;
2355 2381
2356 port = fsf_req->data.open_port.port; 2382 port = (struct zfcp_port *) fsf_req->data;
2357 header = &fsf_req->qtcb->header; 2383 header = &fsf_req->qtcb->header;
2358 2384
2359 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2385 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2566,7 +2592,7 @@ zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
2566 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2592 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2567 2593
2568 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status); 2594 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->port->status);
2569 erp_action->fsf_req->data.close_port.port = erp_action->port; 2595 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2570 erp_action->fsf_req->erp_action = erp_action; 2596 erp_action->fsf_req->erp_action = erp_action;
2571 erp_action->fsf_req->qtcb->header.port_handle = 2597 erp_action->fsf_req->qtcb->header.port_handle =
2572 erp_action->port->handle; 2598 erp_action->port->handle;
@@ -2606,7 +2632,7 @@ zfcp_fsf_close_port_handler(struct zfcp_fsf_req *fsf_req)
2606 int retval = -EINVAL; 2632 int retval = -EINVAL;
2607 struct zfcp_port *port; 2633 struct zfcp_port *port;
2608 2634
2609 port = fsf_req->data.close_port.port; 2635 port = (struct zfcp_port *) fsf_req->data;
2610 2636
2611 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2637 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2612 /* don't change port status in our bookkeeping */ 2638 /* don't change port status in our bookkeeping */
@@ -2703,8 +2729,8 @@ zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2703 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING, 2729 atomic_set_mask(ZFCP_STATUS_PORT_PHYS_CLOSING,
2704 &erp_action->port->status); 2730 &erp_action->port->status);
2705 /* save a pointer to this port */ 2731 /* save a pointer to this port */
2706 erp_action->fsf_req->data.close_physical_port.port = erp_action->port; 2732 erp_action->fsf_req->data = (unsigned long) erp_action->port;
2707 /* port to be closeed */ 2733 /* port to be closed */
2708 erp_action->fsf_req->qtcb->header.port_handle = 2734 erp_action->fsf_req->qtcb->header.port_handle =
2709 erp_action->port->handle; 2735 erp_action->port->handle;
2710 erp_action->fsf_req->erp_action = erp_action; 2736 erp_action->fsf_req->erp_action = erp_action;
@@ -2747,7 +2773,7 @@ zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *fsf_req)
2747 struct fsf_qtcb_header *header; 2773 struct fsf_qtcb_header *header;
2748 u16 subtable, rule, counter; 2774 u16 subtable, rule, counter;
2749 2775
2750 port = fsf_req->data.close_physical_port.port; 2776 port = (struct zfcp_port *) fsf_req->data;
2751 header = &fsf_req->qtcb->header; 2777 header = &fsf_req->qtcb->header;
2752 2778
2753 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2779 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -2908,10 +2934,11 @@ zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
2908 erp_action->port->handle; 2934 erp_action->port->handle;
2909 erp_action->fsf_req->qtcb->bottom.support.fcp_lun = 2935 erp_action->fsf_req->qtcb->bottom.support.fcp_lun =
2910 erp_action->unit->fcp_lun; 2936 erp_action->unit->fcp_lun;
2937 if (!(erp_action->adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2911 erp_action->fsf_req->qtcb->bottom.support.option = 2938 erp_action->fsf_req->qtcb->bottom.support.option =
2912 FSF_OPEN_LUN_SUPPRESS_BOXING; 2939 FSF_OPEN_LUN_SUPPRESS_BOXING;
2913 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status); 2940 atomic_set_mask(ZFCP_STATUS_COMMON_OPENING, &erp_action->unit->status);
2914 erp_action->fsf_req->data.open_unit.unit = erp_action->unit; 2941 erp_action->fsf_req->data = (unsigned long) erp_action->unit;
2915 erp_action->fsf_req->erp_action = erp_action; 2942 erp_action->fsf_req->erp_action = erp_action;
2916 2943
2917 /* start QDIO request for this FSF request */ 2944 /* start QDIO request for this FSF request */
@@ -2955,9 +2982,9 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
2955 struct fsf_qtcb_bottom_support *bottom; 2982 struct fsf_qtcb_bottom_support *bottom;
2956 struct fsf_queue_designator *queue_designator; 2983 struct fsf_queue_designator *queue_designator;
2957 u16 subtable, rule, counter; 2984 u16 subtable, rule, counter;
2958 u32 allowed, exclusive, readwrite; 2985 int exclusive, readwrite;
2959 2986
2960 unit = fsf_req->data.open_unit.unit; 2987 unit = (struct zfcp_unit *) fsf_req->data;
2961 2988
2962 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 2989 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
2963 /* don't change unit status in our bookkeeping */ 2990 /* don't change unit status in our bookkeeping */
@@ -2969,10 +2996,6 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
2969 bottom = &fsf_req->qtcb->bottom.support; 2996 bottom = &fsf_req->qtcb->bottom.support;
2970 queue_designator = &header->fsf_status_qual.fsf_queue_designator; 2997 queue_designator = &header->fsf_status_qual.fsf_queue_designator;
2971 2998
2972 allowed = bottom->lun_access_info & FSF_UNIT_ACCESS_OPEN_LUN_ALLOWED;
2973 exclusive = bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE;
2974 readwrite = bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER;
2975
2976 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 2999 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2977 ZFCP_STATUS_UNIT_SHARED | 3000 ZFCP_STATUS_UNIT_SHARED |
2978 ZFCP_STATUS_UNIT_READONLY, 3001 ZFCP_STATUS_UNIT_READONLY,
@@ -3146,10 +3169,15 @@ zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *fsf_req)
3146 unit->handle); 3169 unit->handle);
3147 /* mark unit as open */ 3170 /* mark unit as open */
3148 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status); 3171 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
3149 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 3172
3150 ZFCP_STATUS_COMMON_ACCESS_BOXED, 3173 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
3151 &unit->status); 3174 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
3152 if (adapter->supported_features & FSF_FEATURE_LUN_SHARING){ 3175 (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
3176 exclusive = (bottom->lun_access_info &
3177 FSF_UNIT_ACCESS_EXCLUSIVE);
3178 readwrite = (bottom->lun_access_info &
3179 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
3180
3153 if (!exclusive) 3181 if (!exclusive)
3154 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED, 3182 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
3155 &unit->status); 3183 &unit->status);
@@ -3242,7 +3270,7 @@ zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
3242 erp_action->port->handle; 3270 erp_action->port->handle;
3243 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle; 3271 erp_action->fsf_req->qtcb->header.lun_handle = erp_action->unit->handle;
3244 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status); 3272 atomic_set_mask(ZFCP_STATUS_COMMON_CLOSING, &erp_action->unit->status);
3245 erp_action->fsf_req->data.close_unit.unit = erp_action->unit; 3273 erp_action->fsf_req->data = (unsigned long) erp_action->unit;
3246 erp_action->fsf_req->erp_action = erp_action; 3274 erp_action->fsf_req->erp_action = erp_action;
3247 3275
3248 /* start QDIO request for this FSF request */ 3276 /* start QDIO request for this FSF request */
@@ -3281,7 +3309,7 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3281 int retval = -EINVAL; 3309 int retval = -EINVAL;
3282 struct zfcp_unit *unit; 3310 struct zfcp_unit *unit;
3283 3311
3284 unit = fsf_req->data.close_unit.unit; /* restore unit */ 3312 unit = (struct zfcp_unit *) fsf_req->data;
3285 3313
3286 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 3314 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
3287 /* don't change unit status in our bookkeeping */ 3315 /* don't change unit status in our bookkeeping */
@@ -3305,9 +3333,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3305 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3333 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3306 "fsf_s_phand_nv"); 3334 "fsf_s_phand_nv");
3307 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 3335 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3308 zfcp_cmd_dbf_event_fsf("porthinv", fsf_req,
3309 &fsf_req->qtcb->header.fsf_status_qual,
3310 sizeof (union fsf_status_qual));
3311 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3336 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3312 break; 3337 break;
3313 3338
@@ -3326,9 +3351,6 @@ zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *fsf_req)
3326 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3351 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3327 "fsf_s_lhand_nv"); 3352 "fsf_s_lhand_nv");
3328 zfcp_erp_port_reopen(unit->port, 0); 3353 zfcp_erp_port_reopen(unit->port, 0);
3329 zfcp_cmd_dbf_event_fsf("lunhinv", fsf_req,
3330 &fsf_req->qtcb->header.fsf_status_qual,
3331 sizeof (union fsf_status_qual));
3332 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3354 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3333 break; 3355 break;
3334 3356
@@ -3436,21 +3458,14 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3436 goto failed_req_create; 3458 goto failed_req_create;
3437 } 3459 }
3438 3460
3439 /* 3461 zfcp_unit_get(unit);
3440 * associate FSF request with SCSI request 3462 fsf_req->unit = unit;
3441 * (need this for look up on abort)
3442 */
3443 fsf_req->data.send_fcp_command_task.fsf_req = fsf_req;
3444 scsi_cmnd->host_scribble = (char *) &(fsf_req->data);
3445 3463
3446 /* 3464 /* associate FSF request with SCSI request (for look up on abort) */
3447 * associate SCSI command with FSF request 3465 scsi_cmnd->host_scribble = (char *) fsf_req;
3448 * (need this for look up on normal command completion) 3466
3449 */ 3467 /* associate SCSI command with FSF request */
3450 fsf_req->data.send_fcp_command_task.scsi_cmnd = scsi_cmnd; 3468 fsf_req->data = (unsigned long) scsi_cmnd;
3451 fsf_req->data.send_fcp_command_task.start_jiffies = jiffies;
3452 fsf_req->data.send_fcp_command_task.unit = unit;
3453 ZFCP_LOG_DEBUG("unit=%p, fcp_lun=0x%016Lx\n", unit, unit->fcp_lun);
3454 3469
3455 /* set handles of unit and its parent port in QTCB */ 3470 /* set handles of unit and its parent port in QTCB */
3456 fsf_req->qtcb->header.lun_handle = unit->handle; 3471 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3584,6 +3599,7 @@ zfcp_fsf_send_fcp_command_task(struct zfcp_adapter *adapter,
3584 send_failed: 3599 send_failed:
3585 no_fit: 3600 no_fit:
3586 failed_scsi_cmnd: 3601 failed_scsi_cmnd:
3602 zfcp_unit_put(unit);
3587 zfcp_fsf_req_free(fsf_req); 3603 zfcp_fsf_req_free(fsf_req);
3588 fsf_req = NULL; 3604 fsf_req = NULL;
3589 scsi_cmnd->host_scribble = NULL; 3605 scsi_cmnd->host_scribble = NULL;
@@ -3640,7 +3656,7 @@ zfcp_fsf_send_fcp_command_task_management(struct zfcp_adapter *adapter,
3640 * hold a pointer to the unit being target of this 3656 * hold a pointer to the unit being target of this
3641 * task management request 3657 * task management request
3642 */ 3658 */
3643 fsf_req->data.send_fcp_command_task_management.unit = unit; 3659 fsf_req->data = (unsigned long) unit;
3644 3660
3645 /* set FSF related fields in QTCB */ 3661 /* set FSF related fields in QTCB */
3646 fsf_req->qtcb->header.lun_handle = unit->handle; 3662 fsf_req->qtcb->header.lun_handle = unit->handle;
@@ -3706,9 +3722,9 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3706 header = &fsf_req->qtcb->header; 3722 header = &fsf_req->qtcb->header;
3707 3723
3708 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)) 3724 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
3709 unit = fsf_req->data.send_fcp_command_task_management.unit; 3725 unit = (struct zfcp_unit *) fsf_req->data;
3710 else 3726 else
3711 unit = fsf_req->data.send_fcp_command_task.unit; 3727 unit = fsf_req->unit;
3712 3728
3713 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 3729 if (unlikely(fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
3714 /* go directly to calls of special handlers */ 3730 /* go directly to calls of special handlers */
@@ -3765,10 +3781,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3765 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3781 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3766 "fsf_s_hand_mis"); 3782 "fsf_s_hand_mis");
3767 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 3783 zfcp_erp_adapter_reopen(unit->port->adapter, 0);
3768 zfcp_cmd_dbf_event_fsf("handmism",
3769 fsf_req,
3770 &header->fsf_status_qual,
3771 sizeof (union fsf_status_qual));
3772 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3784 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3773 break; 3785 break;
3774 3786
@@ -3789,10 +3801,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3789 debug_text_exception(fsf_req->adapter->erp_dbf, 0, 3801 debug_text_exception(fsf_req->adapter->erp_dbf, 0,
3790 "fsf_s_class_nsup"); 3802 "fsf_s_class_nsup");
3791 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3803 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3792 zfcp_cmd_dbf_event_fsf("unsclass",
3793 fsf_req,
3794 &header->fsf_status_qual,
3795 sizeof (union fsf_status_qual));
3796 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3804 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3797 break; 3805 break;
3798 3806
@@ -3811,10 +3819,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3811 debug_text_event(fsf_req->adapter->erp_dbf, 1, 3819 debug_text_event(fsf_req->adapter->erp_dbf, 1,
3812 "fsf_s_fcp_lun_nv"); 3820 "fsf_s_fcp_lun_nv");
3813 zfcp_erp_port_reopen(unit->port, 0); 3821 zfcp_erp_port_reopen(unit->port, 0);
3814 zfcp_cmd_dbf_event_fsf("fluninv",
3815 fsf_req,
3816 &header->fsf_status_qual,
3817 sizeof (union fsf_status_qual));
3818 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3822 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3819 break; 3823 break;
3820 3824
@@ -3853,10 +3857,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3853 debug_text_event(fsf_req->adapter->erp_dbf, 0, 3857 debug_text_event(fsf_req->adapter->erp_dbf, 0,
3854 "fsf_s_dir_ind_nv"); 3858 "fsf_s_dir_ind_nv");
3855 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3859 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3856 zfcp_cmd_dbf_event_fsf("dirinv",
3857 fsf_req,
3858 &header->fsf_status_qual,
3859 sizeof (union fsf_status_qual));
3860 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3860 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3861 break; 3861 break;
3862 3862
@@ -3872,10 +3872,6 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3872 debug_text_event(fsf_req->adapter->erp_dbf, 0, 3872 debug_text_event(fsf_req->adapter->erp_dbf, 0,
3873 "fsf_s_cmd_len_nv"); 3873 "fsf_s_cmd_len_nv");
3874 zfcp_erp_adapter_shutdown(unit->port->adapter, 0); 3874 zfcp_erp_adapter_shutdown(unit->port->adapter, 0);
3875 zfcp_cmd_dbf_event_fsf("cleninv",
3876 fsf_req,
3877 &header->fsf_status_qual,
3878 sizeof (union fsf_status_qual));
3879 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR; 3875 fsf_req->status |= ZFCP_STATUS_FSFREQ_ERROR;
3880 break; 3876 break;
3881 3877
@@ -3947,6 +3943,8 @@ zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *fsf_req)
3947 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req); 3943 zfcp_fsf_send_fcp_command_task_management_handler(fsf_req);
3948 } else { 3944 } else {
3949 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req); 3945 retval = zfcp_fsf_send_fcp_command_task_handler(fsf_req);
3946 fsf_req->unit = NULL;
3947 zfcp_unit_put(unit);
3950 } 3948 }
3951 return retval; 3949 return retval;
3952} 3950}
@@ -3970,10 +3968,10 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
3970 u32 sns_len; 3968 u32 sns_len;
3971 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 3969 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
3972 unsigned long flags; 3970 unsigned long flags;
3973 struct zfcp_unit *unit = fsf_req->data.send_fcp_command_task.unit; 3971 struct zfcp_unit *unit = fsf_req->unit;
3974 3972
3975 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags); 3973 read_lock_irqsave(&fsf_req->adapter->abort_lock, flags);
3976 scpnt = fsf_req->data.send_fcp_command_task.scsi_cmnd; 3974 scpnt = (struct scsi_cmnd *) fsf_req->data;
3977 if (unlikely(!scpnt)) { 3975 if (unlikely(!scpnt)) {
3978 ZFCP_LOG_DEBUG 3976 ZFCP_LOG_DEBUG
3979 ("Command with fsf_req %p is not associated to " 3977 ("Command with fsf_req %p is not associated to "
@@ -4043,7 +4041,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4043 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4041 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4044 (char *) &fsf_req->qtcb-> 4042 (char *) &fsf_req->qtcb->
4045 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4043 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4046 zfcp_cmd_dbf_event_fsf("clenmis", fsf_req, NULL, 0);
4047 set_host_byte(&scpnt->result, DID_ERROR); 4044 set_host_byte(&scpnt->result, DID_ERROR);
4048 goto skip_fsfstatus; 4045 goto skip_fsfstatus;
4049 case RSP_CODE_FIELD_INVALID: 4046 case RSP_CODE_FIELD_INVALID:
@@ -4062,7 +4059,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4062 (char *) &fsf_req->qtcb-> 4059 (char *) &fsf_req->qtcb->
4063 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4060 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4064 set_host_byte(&scpnt->result, DID_ERROR); 4061 set_host_byte(&scpnt->result, DID_ERROR);
4065 zfcp_cmd_dbf_event_fsf("codeinv", fsf_req, NULL, 0);
4066 goto skip_fsfstatus; 4062 goto skip_fsfstatus;
4067 case RSP_CODE_RO_MISMATCH: 4063 case RSP_CODE_RO_MISMATCH:
4068 /* hardware bug */ 4064 /* hardware bug */
@@ -4079,7 +4075,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4079 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4075 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4080 (char *) &fsf_req->qtcb-> 4076 (char *) &fsf_req->qtcb->
4081 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4077 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4082 zfcp_cmd_dbf_event_fsf("codemism", fsf_req, NULL, 0);
4083 set_host_byte(&scpnt->result, DID_ERROR); 4078 set_host_byte(&scpnt->result, DID_ERROR);
4084 goto skip_fsfstatus; 4079 goto skip_fsfstatus;
4085 default: 4080 default:
@@ -4096,7 +4091,6 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4096 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG, 4091 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_DEBUG,
4097 (char *) &fsf_req->qtcb-> 4092 (char *) &fsf_req->qtcb->
4098 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE); 4093 bottom.io.fcp_cmnd, FSF_FCP_CMND_SIZE);
4099 zfcp_cmd_dbf_event_fsf("undeffcp", fsf_req, NULL, 0);
4100 set_host_byte(&scpnt->result, DID_ERROR); 4094 set_host_byte(&scpnt->result, DID_ERROR);
4101 goto skip_fsfstatus; 4095 goto skip_fsfstatus;
4102 } 4096 }
@@ -4158,19 +4152,17 @@ zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *fsf_req)
4158 skip_fsfstatus: 4152 skip_fsfstatus:
4159 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result); 4153 ZFCP_LOG_DEBUG("scpnt->result =0x%x\n", scpnt->result);
4160 4154
4161 zfcp_cmd_dbf_event_scsi("response", scpnt); 4155 if (scpnt->result != 0)
4156 zfcp_scsi_dbf_event_result("erro", 3, fsf_req->adapter, scpnt);
4157 else if (scpnt->retries > 0)
4158 zfcp_scsi_dbf_event_result("retr", 4, fsf_req->adapter, scpnt);
4159 else
4160 zfcp_scsi_dbf_event_result("norm", 6, fsf_req->adapter, scpnt);
4162 4161
4163 /* cleanup pointer (need this especially for abort) */ 4162 /* cleanup pointer (need this especially for abort) */
4164 scpnt->host_scribble = NULL; 4163 scpnt->host_scribble = NULL;
4165 4164
4166 /*
4167 * NOTE:
4168 * according to the outcome of a discussion on linux-scsi we
4169 * don't need to grab the io_request_lock here since we use
4170 * the new eh
4171 */
4172 /* always call back */ 4165 /* always call back */
4173
4174 (scpnt->scsi_done) (scpnt); 4166 (scpnt->scsi_done) (scpnt);
4175 4167
4176 /* 4168 /*
@@ -4198,8 +4190,7 @@ zfcp_fsf_send_fcp_command_task_management_handler(struct zfcp_fsf_req *fsf_req)
4198 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 4190 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *)
4199 &(fsf_req->qtcb->bottom.io.fcp_rsp); 4191 &(fsf_req->qtcb->bottom.io.fcp_rsp);
4200 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu); 4192 char *fcp_rsp_info = zfcp_get_fcp_rsp_info_ptr(fcp_rsp_iu);
4201 struct zfcp_unit *unit = 4193 struct zfcp_unit *unit = (struct zfcp_unit *) fsf_req->data;
4202 fsf_req->data.send_fcp_command_task_management.unit;
4203 4194
4204 del_timer(&fsf_req->adapter->scsi_er_timer); 4195 del_timer(&fsf_req->adapter->scsi_er_timer);
4205 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) { 4196 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) {
@@ -4276,7 +4267,7 @@ zfcp_fsf_control_file(struct zfcp_adapter *adapter,
4276 int direction; 4267 int direction;
4277 int retval = 0; 4268 int retval = 0;
4278 4269
4279 if (!(adapter->supported_features & FSF_FEATURE_CFDC)) { 4270 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) {
4280 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n", 4271 ZFCP_LOG_INFO("cfdc not supported (adapter %s)\n",
4281 zfcp_get_busid_by_adapter(adapter)); 4272 zfcp_get_busid_by_adapter(adapter));
4282 retval = -EOPNOTSUPP; 4273 retval = -EOPNOTSUPP;
@@ -4549,52 +4540,6 @@ skip_fsfstatus:
4549 return retval; 4540 return retval;
4550} 4541}
4551 4542
4552
4553/*
4554 * function: zfcp_fsf_req_wait_and_cleanup
4555 *
4556 * purpose:
4557 *
4558 * FIXME(design): signal seems to be <0 !!!
4559 * returns: 0 - request completed (*status is valid), cleanup succ.
4560 * <0 - request completed (*status is valid), cleanup failed
4561 * >0 - signal which interrupted waiting (*status invalid),
4562 * request not completed, no cleanup
4563 *
4564 * *status is a copy of status of completed fsf_req
4565 */
4566int
4567zfcp_fsf_req_wait_and_cleanup(struct zfcp_fsf_req *fsf_req,
4568 int interruptible, u32 * status)
4569{
4570 int retval = 0;
4571 int signal = 0;
4572
4573 if (interruptible) {
4574 __wait_event_interruptible(fsf_req->completion_wq,
4575 fsf_req->status &
4576 ZFCP_STATUS_FSFREQ_COMPLETED,
4577 signal);
4578 if (signal) {
4579 ZFCP_LOG_DEBUG("Caught signal %i while waiting for the "
4580 "completion of the request at %p\n",
4581 signal, fsf_req);
4582 retval = signal;
4583 goto out;
4584 }
4585 } else {
4586 __wait_event(fsf_req->completion_wq,
4587 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
4588 }
4589
4590 *status = fsf_req->status;
4591
4592 /* cleanup request */
4593 zfcp_fsf_req_free(fsf_req);
4594 out:
4595 return retval;
4596}
4597
4598static inline int 4543static inline int
4599zfcp_fsf_req_sbal_check(unsigned long *flags, 4544zfcp_fsf_req_sbal_check(unsigned long *flags,
4600 struct zfcp_qdio_queue *queue, int needed) 4545 struct zfcp_qdio_queue *queue, int needed)
@@ -4610,15 +4555,16 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
4610 * set qtcb pointer in fsf_req and initialize QTCB 4555 * set qtcb pointer in fsf_req and initialize QTCB
4611 */ 4556 */
4612static inline void 4557static inline void
4613zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req, u32 fsf_cmd) 4558zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4614{ 4559{
4615 if (likely(fsf_req->qtcb != NULL)) { 4560 if (likely(fsf_req->qtcb != NULL)) {
4561 fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no;
4616 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; 4562 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
4617 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; 4563 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4618 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 4564 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command];
4619 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; 4565 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4620 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; 4566 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
4621 fsf_req->qtcb->header.fsf_command = fsf_cmd; 4567 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4622 } 4568 }
4623} 4569}
4624 4570
@@ -4686,7 +4632,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4686 goto failed_fsf_req; 4632 goto failed_fsf_req;
4687 } 4633 }
4688 4634
4689 zfcp_fsf_req_qtcb_init(fsf_req, fsf_cmd); 4635 fsf_req->adapter = adapter;
4636 fsf_req->fsf_command = fsf_cmd;
4637
4638 zfcp_fsf_req_qtcb_init(fsf_req);
4690 4639
4691 /* initialize waitqueue which may be used to wait on 4640 /* initialize waitqueue which may be used to wait on
4692 this request completion */ 4641 this request completion */
@@ -4708,8 +4657,10 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4708 goto failed_sbals; 4657 goto failed_sbals;
4709 } 4658 }
4710 4659
4711 fsf_req->adapter = adapter; /* pointer to "parent" adapter */ 4660 if (fsf_req->qtcb) {
4712 fsf_req->fsf_command = fsf_cmd; 4661 fsf_req->seq_no = adapter->fsf_req_seq_no;
4662 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
4663 }
4713 fsf_req->sbal_number = 1; 4664 fsf_req->sbal_number = 1;
4714 fsf_req->sbal_first = req_queue->free_index; 4665 fsf_req->sbal_first = req_queue->free_index;
4715 fsf_req->sbal_curr = req_queue->free_index; 4666 fsf_req->sbal_curr = req_queue->free_index;
@@ -4760,9 +4711,9 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4760 struct zfcp_adapter *adapter; 4711 struct zfcp_adapter *adapter;
4761 struct zfcp_qdio_queue *req_queue; 4712 struct zfcp_qdio_queue *req_queue;
4762 volatile struct qdio_buffer_element *sbale; 4713 volatile struct qdio_buffer_element *sbale;
4714 int inc_seq_no;
4763 int new_distance_from_int; 4715 int new_distance_from_int;
4764 unsigned long flags; 4716 unsigned long flags;
4765 int inc_seq_no = 1;
4766 int retval = 0; 4717 int retval = 0;
4767 4718
4768 adapter = fsf_req->adapter; 4719 adapter = fsf_req->adapter;
@@ -4776,23 +4727,13 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4776 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 4727 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
4777 sbale[1].length); 4728 sbale[1].length);
4778 4729
4779 /* set sequence counter in QTCB */
4780 if (likely(fsf_req->qtcb)) {
4781 fsf_req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
4782 fsf_req->seq_no = adapter->fsf_req_seq_no;
4783 ZFCP_LOG_TRACE("FSF request %p of adapter %s gets "
4784 "FSF sequence counter value of %i\n",
4785 fsf_req,
4786 zfcp_get_busid_by_adapter(adapter),
4787 fsf_req->qtcb->prefix.req_seq_no);
4788 } else
4789 inc_seq_no = 0;
4790
4791 /* put allocated FSF request at list tail */ 4730 /* put allocated FSF request at list tail */
4792 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4731 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
4793 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); 4732 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
4794 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4733 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
4795 4734
4735 inc_seq_no = (fsf_req->qtcb != NULL);
4736
4796 /* figure out expiration time of timeout and start timeout */ 4737 /* figure out expiration time of timeout and start timeout */
4797 if (unlikely(timer)) { 4738 if (unlikely(timer)) {
4798 timer->expires += jiffies; 4739 timer->expires += jiffies;
@@ -4822,6 +4763,8 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4822 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */ 4763 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap if needed */
4823 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req); 4764 new_distance_from_int = zfcp_qdio_determine_pci(req_queue, fsf_req);
4824 4765
4766 fsf_req->issued = get_clock();
4767
4825 retval = do_QDIO(adapter->ccw_device, 4768 retval = do_QDIO(adapter->ccw_device,
4826 QDIO_FLAG_SYNC_OUTPUT, 4769 QDIO_FLAG_SYNC_OUTPUT,
4827 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 4770 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
@@ -4860,15 +4803,11 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4860 * routines resulting in missing sequence counter values 4803 * routines resulting in missing sequence counter values
4861 * otherwise, 4804 * otherwise,
4862 */ 4805 */
4806
4863 /* Don't increase for unsolicited status */ 4807 /* Don't increase for unsolicited status */
4864 if (likely(inc_seq_no)) { 4808 if (inc_seq_no)
4865 adapter->fsf_req_seq_no++; 4809 adapter->fsf_req_seq_no++;
4866 ZFCP_LOG_TRACE 4810
4867 ("FSF sequence counter value of adapter %s "
4868 "increased to %i\n",
4869 zfcp_get_busid_by_adapter(adapter),
4870 adapter->fsf_req_seq_no);
4871 }
4872 /* count FSF requests pending */ 4811 /* count FSF requests pending */
4873 atomic_inc(&adapter->fsf_reqs_active); 4812 atomic_inc(&adapter->fsf_reqs_active);
4874 } 4813 }
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 07140dfda2a7..48719f055952 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -116,6 +116,7 @@
116#define FSF_INVALID_COMMAND_OPTION 0x000000E5 116#define FSF_INVALID_COMMAND_OPTION 0x000000E5
117/* #define FSF_ERROR 0x000000FF */ 117/* #define FSF_ERROR 0x000000FF */
118 118
119#define FSF_PROT_STATUS_QUAL_SIZE 16
119#define FSF_STATUS_QUALIFIER_SIZE 16 120#define FSF_STATUS_QUALIFIER_SIZE 16
120 121
121/* FSF status qualifier, recommendations */ 122/* FSF status qualifier, recommendations */
@@ -139,9 +140,18 @@
139#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004 140#define FSF_SQ_CFDC_SUBTABLE_LUN 0x0004
140 141
141/* FSF status qualifier (most significant 4 bytes), local link down */ 142/* FSF status qualifier (most significant 4 bytes), local link down */
142#define FSF_PSQ_LINK_NOLIGHT 0x00000004 143#define FSF_PSQ_LINK_NO_LIGHT 0x00000004
143#define FSF_PSQ_LINK_WRAPPLUG 0x00000008 144#define FSF_PSQ_LINK_WRAP_PLUG 0x00000008
144#define FSF_PSQ_LINK_NOFCP 0x00000010 145#define FSF_PSQ_LINK_NO_FCP 0x00000010
146#define FSF_PSQ_LINK_FIRMWARE_UPDATE 0x00000020
147#define FSF_PSQ_LINK_INVALID_WWPN 0x00000100
148#define FSF_PSQ_LINK_NO_NPIV_SUPPORT 0x00000200
149#define FSF_PSQ_LINK_NO_FCP_RESOURCES 0x00000400
150#define FSF_PSQ_LINK_NO_FABRIC_RESOURCES 0x00000800
151#define FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE 0x00001000
152#define FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED 0x00002000
153#define FSF_PSQ_LINK_MODE_TABLE_CURRUPTED 0x00004000
154#define FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT 0x00008000
145 155
146/* payload size in status read buffer */ 156/* payload size in status read buffer */
147#define FSF_STATUS_READ_PAYLOAD_SIZE 4032 157#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
@@ -154,15 +164,21 @@
154#define FSF_STATUS_READ_INCOMING_ELS 0x00000002 164#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
155#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003 165#define FSF_STATUS_READ_SENSE_DATA_AVAIL 0x00000003
156#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004 166#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
157#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */ 167#define FSF_STATUS_READ_LINK_DOWN 0x00000005
158#define FSF_STATUS_READ_LINK_UP 0x00000006 168#define FSF_STATUS_READ_LINK_UP 0x00000006
159#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A 169#define FSF_STATUS_READ_CFDC_UPDATED 0x0000000A
160#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B 170#define FSF_STATUS_READ_CFDC_HARDENED 0x0000000B
171#define FSF_STATUS_READ_FEATURE_UPDATE_ALERT 0x0000000C
161 172
162/* status subtypes in status read buffer */ 173/* status subtypes in status read buffer */
163#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001 174#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
164#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002 175#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
165 176
177/* status subtypes for link down */
178#define FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK 0x00000000
179#define FSF_STATUS_READ_SUB_FDISC_FAILED 0x00000001
180#define FSF_STATUS_READ_SUB_FIRMWARE_UPDATE 0x00000002
181
166/* status subtypes for CFDC */ 182/* status subtypes for CFDC */
167#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002 183#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE 0x00000002
168#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F 184#define FSF_STATUS_READ_SUB_CFDC_HARDENED_ON_SE2 0x0000000F
@@ -193,11 +209,15 @@
193#define FSF_QTCB_LOG_SIZE 1024 209#define FSF_QTCB_LOG_SIZE 1024
194 210
195/* channel features */ 211/* channel features */
196#define FSF_FEATURE_QTCB_SUPPRESSION 0x00000001
197#define FSF_FEATURE_CFDC 0x00000002 212#define FSF_FEATURE_CFDC 0x00000002
198#define FSF_FEATURE_LUN_SHARING 0x00000004 213#define FSF_FEATURE_LUN_SHARING 0x00000004
199#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010 214#define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
200#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020 215#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
216#define FSF_FEATURE_UPDATE_ALERT 0x00000100
217
218/* host connection features */
219#define FSF_FEATURE_NPIV_MODE 0x00000001
220#define FSF_FEATURE_VM_ASSIGNED_WWPN 0x00000002
201 221
202/* option */ 222/* option */
203#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001 223#define FSF_OPEN_LUN_SUPPRESS_BOXING 0x00000001
@@ -305,15 +325,23 @@ struct fsf_qual_sequence_error {
305 u32 res1[3]; 325 u32 res1[3];
306} __attribute__ ((packed)); 326} __attribute__ ((packed));
307 327
308struct fsf_qual_locallink_error { 328struct fsf_link_down_info {
309 u32 code; 329 u32 error_code;
310 u32 res1[3]; 330 u32 res1;
331 u8 res2[2];
332 u8 primary_status;
333 u8 ioerr_code;
334 u8 action_code;
335 u8 reason_code;
336 u8 explanation_code;
337 u8 vendor_specific_code;
311} __attribute__ ((packed)); 338} __attribute__ ((packed));
312 339
313union fsf_prot_status_qual { 340union fsf_prot_status_qual {
341 u64 doubleword[FSF_PROT_STATUS_QUAL_SIZE / sizeof(u64)];
314 struct fsf_qual_version_error version_error; 342 struct fsf_qual_version_error version_error;
315 struct fsf_qual_sequence_error sequence_error; 343 struct fsf_qual_sequence_error sequence_error;
316 struct fsf_qual_locallink_error locallink_error; 344 struct fsf_link_down_info link_down_info;
317} __attribute__ ((packed)); 345} __attribute__ ((packed));
318 346
319struct fsf_qtcb_prefix { 347struct fsf_qtcb_prefix {
@@ -331,7 +359,9 @@ union fsf_status_qual {
331 u8 byte[FSF_STATUS_QUALIFIER_SIZE]; 359 u8 byte[FSF_STATUS_QUALIFIER_SIZE];
332 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)]; 360 u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
333 u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)]; 361 u32 word[FSF_STATUS_QUALIFIER_SIZE / sizeof (u32)];
362 u64 doubleword[FSF_STATUS_QUALIFIER_SIZE / sizeof(u64)];
334 struct fsf_queue_designator fsf_queue_designator; 363 struct fsf_queue_designator fsf_queue_designator;
364 struct fsf_link_down_info link_down_info;
335} __attribute__ ((packed)); 365} __attribute__ ((packed));
336 366
337struct fsf_qtcb_header { 367struct fsf_qtcb_header {
@@ -406,8 +436,8 @@ struct fsf_qtcb_bottom_config {
406 u32 low_qtcb_version; 436 u32 low_qtcb_version;
407 u32 max_qtcb_size; 437 u32 max_qtcb_size;
408 u32 max_data_transfer_size; 438 u32 max_data_transfer_size;
409 u32 supported_features; 439 u32 adapter_features;
410 u8 res1[4]; 440 u32 connection_features;
411 u32 fc_topology; 441 u32 fc_topology;
412 u32 fc_link_speed; 442 u32 fc_link_speed;
413 u32 adapter_type; 443 u32 adapter_type;
@@ -425,7 +455,7 @@ struct fsf_qtcb_bottom_config {
425} __attribute__ ((packed)); 455} __attribute__ ((packed));
426 456
427struct fsf_qtcb_bottom_port { 457struct fsf_qtcb_bottom_port {
428 u8 res1[8]; 458 u64 wwpn;
429 u32 fc_port_id; 459 u32 fc_port_id;
430 u32 port_type; 460 u32 port_type;
431 u32 port_state; 461 u32 port_state;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 24e16ec331d9..d719f66a29a4 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -54,8 +54,7 @@ static inline int zfcp_qdio_sbals_from_buffer
54static qdio_handler_t zfcp_qdio_request_handler; 54static qdio_handler_t zfcp_qdio_request_handler;
55static qdio_handler_t zfcp_qdio_response_handler; 55static qdio_handler_t zfcp_qdio_response_handler;
56static int zfcp_qdio_handler_error_check(struct zfcp_adapter *, 56static int zfcp_qdio_handler_error_check(struct zfcp_adapter *,
57 unsigned int, 57 unsigned int, unsigned int, unsigned int, int, int);
58 unsigned int, unsigned int);
59 58
60#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO 59#define ZFCP_LOG_AREA ZFCP_LOG_AREA_QDIO
61 60
@@ -214,22 +213,12 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
214 * 213 *
215 */ 214 */
216static inline int 215static inline int
217zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, 216zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
218 unsigned int status, 217 unsigned int qdio_error, unsigned int siga_error,
219 unsigned int qdio_error, unsigned int siga_error) 218 int first_element, int elements_processed)
220{ 219{
221 int retval = 0; 220 int retval = 0;
222 221
223 if (ZFCP_LOG_CHECK(ZFCP_LOG_LEVEL_TRACE)) {
224 if (status & QDIO_STATUS_INBOUND_INT) {
225 ZFCP_LOG_TRACE("status is"
226 " QDIO_STATUS_INBOUND_INT \n");
227 }
228 if (status & QDIO_STATUS_OUTBOUND_INT) {
229 ZFCP_LOG_TRACE("status is"
230 " QDIO_STATUS_OUTBOUND_INT \n");
231 }
232 }
233 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) { 222 if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
234 retval = -EIO; 223 retval = -EIO;
235 224
@@ -237,9 +226,10 @@ zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter,
237 "qdio_error=0x%x, siga_error=0x%x)\n", 226 "qdio_error=0x%x, siga_error=0x%x)\n",
238 status, qdio_error, siga_error); 227 status, qdio_error, siga_error);
239 228
240 /* Restarting IO on the failed adapter from scratch */ 229 zfcp_hba_dbf_event_qdio(adapter, status, qdio_error, siga_error,
241 debug_text_event(adapter->erp_dbf, 1, "qdio_err"); 230 first_element, elements_processed);
242 /* 231 /*
232 * Restarting IO on the failed adapter from scratch.
243 * Since we have been using this adapter, it is save to assume 233 * Since we have been using this adapter, it is save to assume
244 * that it is not failed but recoverable. The card seems to 234 * that it is not failed but recoverable. The card seems to
245 * report link-up events by self-initiated queue shutdown. 235 * report link-up events by self-initiated queue shutdown.
@@ -282,7 +272,8 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
282 first_element, elements_processed); 272 first_element, elements_processed);
283 273
284 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 274 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
285 siga_error))) 275 siga_error, first_element,
276 elements_processed)))
286 goto out; 277 goto out;
287 /* 278 /*
288 * we stored address of struct zfcp_adapter data structure 279 * we stored address of struct zfcp_adapter data structure
@@ -334,7 +325,8 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
334 queue = &adapter->response_queue; 325 queue = &adapter->response_queue;
335 326
336 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error, 327 if (unlikely(zfcp_qdio_handler_error_check(adapter, status, qdio_error,
337 siga_error))) 328 siga_error, first_element,
329 elements_processed)))
338 goto out; 330 goto out;
339 331
340 /* 332 /*
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 31a76065cf28..3dcd1bfba3b4 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -44,7 +44,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
44static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); 44static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
45static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); 45static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
46static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); 46static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
47static int zfcp_task_management_function(struct zfcp_unit *, u8); 47static int zfcp_task_management_function(struct zfcp_unit *, u8,
48 struct scsi_cmnd *);
48 49
49static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t, 50static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int, scsi_id_t,
50 scsi_lun_t); 51 scsi_lun_t);
@@ -242,7 +243,10 @@ static void
242zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) 243zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
243{ 244{
244 set_host_byte(&scpnt->result, result); 245 set_host_byte(&scpnt->result, result);
245 zfcp_cmd_dbf_event_scsi("failing", scpnt); 246 if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
247 zfcp_scsi_dbf_event_result("fail", 4,
248 (struct zfcp_adapter*) scpnt->device->host->hostdata[0],
249 scpnt);
246 /* return directly */ 250 /* return directly */
247 scpnt->scsi_done(scpnt); 251 scpnt->scsi_done(scpnt);
248} 252}
@@ -414,67 +418,38 @@ zfcp_port_lookup(struct zfcp_adapter *adapter, int channel, scsi_id_t id)
414 return (struct zfcp_port *) NULL; 418 return (struct zfcp_port *) NULL;
415} 419}
416 420
417/* 421/**
418 * function: zfcp_scsi_eh_abort_handler 422 * zfcp_scsi_eh_abort_handler - abort the specified SCSI command
419 * 423 * @scpnt: pointer to scsi_cmnd to be aborted
420 * purpose: tries to abort the specified (timed out) SCSI command 424 * Return: SUCCESS - command has been aborted and cleaned up in internal
421 * 425 * bookkeeping, SCSI stack won't be called for aborted command
422 * note: We do not need to care for a SCSI command which completes 426 * FAILED - otherwise
423 * normally but late during this abort routine runs.
424 * We are allowed to return late commands to the SCSI stack.
425 * It tracks the state of commands and will handle late commands.
426 * (Usually, the normal completion of late commands is ignored with
427 * respect to the running abort operation. Grep for 'done_late'
428 * in the SCSI stacks sources.)
429 * 427 *
430 * returns: SUCCESS - command has been aborted and cleaned up in internal 428 * We do not need to care for a SCSI command which completes normally
431 * bookkeeping, 429 * but late during this abort routine runs. We are allowed to return
432 * SCSI stack won't be called for aborted command 430 * late commands to the SCSI stack. It tracks the state of commands and
433 * FAILED - otherwise 431 * will handle late commands. (Usually, the normal completion of late
432 * commands is ignored with respect to the running abort operation.)
434 */ 433 */
435int 434int
436__zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 435zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
437{ 436{
437 struct Scsi_Host *scsi_host;
438 struct zfcp_adapter *adapter;
439 struct zfcp_unit *unit;
438 int retval = SUCCESS; 440 int retval = SUCCESS;
439 struct zfcp_fsf_req *new_fsf_req, *old_fsf_req; 441 struct zfcp_fsf_req *new_fsf_req = NULL;
440 struct zfcp_adapter *adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; 442 struct zfcp_fsf_req *old_fsf_req;
441 struct zfcp_unit *unit = (struct zfcp_unit *) scpnt->device->hostdata;
442 struct zfcp_port *port = unit->port;
443 struct Scsi_Host *scsi_host = scpnt->device->host;
444 union zfcp_req_data *req_data = NULL;
445 unsigned long flags; 443 unsigned long flags;
446 u32 status = 0; 444
447 445 scsi_host = scpnt->device->host;
448 /* the components of a abort_dbf record (fixed size record) */ 446 adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
449 u64 dbf_scsi_cmnd = (unsigned long) scpnt; 447 unit = (struct zfcp_unit *) scpnt->device->hostdata;
450 char dbf_opcode[ZFCP_ABORT_DBF_LENGTH];
451 wwn_t dbf_wwn = port->wwpn;
452 fcp_lun_t dbf_fcp_lun = unit->fcp_lun;
453 u64 dbf_retries = scpnt->retries;
454 u64 dbf_allowed = scpnt->allowed;
455 u64 dbf_timeout = 0;
456 u64 dbf_fsf_req = 0;
457 u64 dbf_fsf_status = 0;
458 u64 dbf_fsf_qual[2] = { 0, 0 };
459 char dbf_result[ZFCP_ABORT_DBF_LENGTH] = "##undef";
460
461 memset(dbf_opcode, 0, ZFCP_ABORT_DBF_LENGTH);
462 memcpy(dbf_opcode,
463 scpnt->cmnd,
464 min(scpnt->cmd_len, (unsigned char) ZFCP_ABORT_DBF_LENGTH));
465 448
466 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n", 449 ZFCP_LOG_INFO("aborting scsi_cmnd=%p on adapter %s\n",
467 scpnt, zfcp_get_busid_by_adapter(adapter)); 450 scpnt, zfcp_get_busid_by_adapter(adapter));
468 451
469 spin_unlock_irq(scsi_host->host_lock); 452 /* avoid race condition between late normal completion and abort */
470
471 /*
472 * Race condition between normal (late) completion and abort has
473 * to be avoided.
474 * The entirity of all accesses to scsi_req have to be atomic.
475 * scsi_req is usually part of the fsf_req and thus we block the
476 * release of fsf_req as long as we need to access scsi_req.
477 */
478 write_lock_irqsave(&adapter->abort_lock, flags); 453 write_lock_irqsave(&adapter->abort_lock, flags);
479 454
480 /* 455 /*
@@ -484,144 +459,47 @@ __zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
484 * this routine returns. (scpnt is parameter passed to this routine 459 * this routine returns. (scpnt is parameter passed to this routine
485 * and must not disappear during abort even on late completion.) 460 * and must not disappear during abort even on late completion.)
486 */ 461 */
487 req_data = (union zfcp_req_data *) scpnt->host_scribble; 462 old_fsf_req = (struct zfcp_fsf_req *) scpnt->host_scribble;
488 /* DEBUG */
489 ZFCP_LOG_DEBUG("req_data=%p\n", req_data);
490 if (!req_data) {
491 ZFCP_LOG_DEBUG("late command completion overtook abort\n");
492 /*
493 * That's it.
494 * Do not initiate abort but return SUCCESS.
495 */
496 write_unlock_irqrestore(&adapter->abort_lock, flags);
497 retval = SUCCESS;
498 strncpy(dbf_result, "##late1", ZFCP_ABORT_DBF_LENGTH);
499 goto out;
500 }
501
502 /* Figure out which fsf_req needs to be aborted. */
503 old_fsf_req = req_data->send_fcp_command_task.fsf_req;
504
505 dbf_fsf_req = (unsigned long) old_fsf_req;
506 dbf_timeout =
507 (jiffies - req_data->send_fcp_command_task.start_jiffies) / HZ;
508
509 ZFCP_LOG_DEBUG("old_fsf_req=%p\n", old_fsf_req);
510 if (!old_fsf_req) { 463 if (!old_fsf_req) {
511 write_unlock_irqrestore(&adapter->abort_lock, flags); 464 write_unlock_irqrestore(&adapter->abort_lock, flags);
512 ZFCP_LOG_NORMAL("bug: no old fsf request found\n"); 465 zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, new_fsf_req);
513 ZFCP_LOG_NORMAL("req_data:\n"); 466 retval = SUCCESS;
514 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
515 (char *) req_data, sizeof (union zfcp_req_data));
516 ZFCP_LOG_NORMAL("scsi_cmnd:\n");
517 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_NORMAL,
518 (char *) scpnt, sizeof (struct scsi_cmnd));
519 retval = FAILED;
520 strncpy(dbf_result, "##bug:r", ZFCP_ABORT_DBF_LENGTH);
521 goto out; 467 goto out;
522 } 468 }
523 old_fsf_req->data.send_fcp_command_task.scsi_cmnd = NULL; 469 old_fsf_req->data = 0;
524 /* mark old request as being aborted */
525 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING; 470 old_fsf_req->status |= ZFCP_STATUS_FSFREQ_ABORTING;
526 /*
527 * We have to collect all information (e.g. unit) needed by
528 * zfcp_fsf_abort_fcp_command before calling that routine
529 * since that routine is not allowed to access
530 * fsf_req which it is going to abort.
531 * This is because of we need to release fsf_req_list_lock
532 * before calling zfcp_fsf_abort_fcp_command.
533 * Since this lock will not be held, fsf_req may complete
534 * late and may be released meanwhile.
535 */
536 ZFCP_LOG_DEBUG("unit 0x%016Lx (%p)\n", unit->fcp_lun, unit);
537 471
538 /* 472 /* don't access old_fsf_req after releasing the abort_lock */
539 * We block (call schedule)
540 * That's why we must release the lock and enable the
541 * interrupts before.
542 * On the other hand we do not need the lock anymore since
543 * all critical accesses to scsi_req are done.
544 */
545 write_unlock_irqrestore(&adapter->abort_lock, flags); 473 write_unlock_irqrestore(&adapter->abort_lock, flags);
546 /* call FSF routine which does the abort */ 474 /* call FSF routine which does the abort */
547 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req, 475 new_fsf_req = zfcp_fsf_abort_fcp_command((unsigned long) old_fsf_req,
548 adapter, unit, 0); 476 adapter, unit, 0);
549 ZFCP_LOG_DEBUG("new_fsf_req=%p\n", new_fsf_req);
550 if (!new_fsf_req) { 477 if (!new_fsf_req) {
478 ZFCP_LOG_INFO("error: initiation of Abort FCP Cmnd failed\n");
551 retval = FAILED; 479 retval = FAILED;
552 ZFCP_LOG_NORMAL("error: initiation of Abort FCP Cmnd "
553 "failed\n");
554 strncpy(dbf_result, "##nores", ZFCP_ABORT_DBF_LENGTH);
555 goto out; 480 goto out;
556 } 481 }
557 482
558 /* wait for completion of abort */ 483 /* wait for completion of abort */
559 ZFCP_LOG_DEBUG("waiting for cleanup...\n");
560#if 1
561 /*
562 * FIXME:
563 * copying zfcp_fsf_req_wait_and_cleanup code is not really nice
564 */
565 __wait_event(new_fsf_req->completion_wq, 484 __wait_event(new_fsf_req->completion_wq,
566 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); 485 new_fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
567 status = new_fsf_req->status; 486
568 dbf_fsf_status = new_fsf_req->qtcb->header.fsf_status;
569 /*
570 * Ralphs special debug load provides timestamps in the FSF
571 * status qualifier. This might be specified later if being
572 * useful for debugging aborts.
573 */
574 dbf_fsf_qual[0] =
575 *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[0];
576 dbf_fsf_qual[1] =
577 *(u64 *) & new_fsf_req->qtcb->header.fsf_status_qual.word[2];
578 zfcp_fsf_req_free(new_fsf_req);
579#else
580 retval = zfcp_fsf_req_wait_and_cleanup(new_fsf_req,
581 ZFCP_UNINTERRUPTIBLE, &status);
582#endif
583 ZFCP_LOG_DEBUG("Waiting for cleanup complete, status=0x%x\n", status);
584 /* status should be valid since signals were not permitted */ 487 /* status should be valid since signals were not permitted */
585 if (status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) { 488 if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) {
489 zfcp_scsi_dbf_event_abort("okay", adapter, scpnt, new_fsf_req);
586 retval = SUCCESS; 490 retval = SUCCESS;
587 strncpy(dbf_result, "##succ", ZFCP_ABORT_DBF_LENGTH); 491 } else if (new_fsf_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) {
588 } else if (status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) { 492 zfcp_scsi_dbf_event_abort("lte2", adapter, scpnt, new_fsf_req);
589 retval = SUCCESS; 493 retval = SUCCESS;
590 strncpy(dbf_result, "##late2", ZFCP_ABORT_DBF_LENGTH);
591 } else { 494 } else {
495 zfcp_scsi_dbf_event_abort("fail", adapter, scpnt, new_fsf_req);
592 retval = FAILED; 496 retval = FAILED;
593 strncpy(dbf_result, "##fail", ZFCP_ABORT_DBF_LENGTH);
594 } 497 }
595 498 zfcp_fsf_req_free(new_fsf_req);
596 out: 499 out:
597 debug_event(adapter->abort_dbf, 1, &dbf_scsi_cmnd, sizeof (u64));
598 debug_event(adapter->abort_dbf, 1, &dbf_opcode, ZFCP_ABORT_DBF_LENGTH);
599 debug_event(adapter->abort_dbf, 1, &dbf_wwn, sizeof (wwn_t));
600 debug_event(adapter->abort_dbf, 1, &dbf_fcp_lun, sizeof (fcp_lun_t));
601 debug_event(adapter->abort_dbf, 1, &dbf_retries, sizeof (u64));
602 debug_event(adapter->abort_dbf, 1, &dbf_allowed, sizeof (u64));
603 debug_event(adapter->abort_dbf, 1, &dbf_timeout, sizeof (u64));
604 debug_event(adapter->abort_dbf, 1, &dbf_fsf_req, sizeof (u64));
605 debug_event(adapter->abort_dbf, 1, &dbf_fsf_status, sizeof (u64));
606 debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[0], sizeof (u64));
607 debug_event(adapter->abort_dbf, 1, &dbf_fsf_qual[1], sizeof (u64));
608 debug_text_event(adapter->abort_dbf, 1, dbf_result);
609
610 spin_lock_irq(scsi_host->host_lock);
611 return retval; 500 return retval;
612} 501}
613 502
614int
615zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
616{
617 int rc;
618 struct Scsi_Host *scsi_host = scpnt->device->host;
619 spin_lock_irq(scsi_host->host_lock);
620 rc = __zfcp_scsi_eh_abort_handler(scpnt);
621 spin_unlock_irq(scsi_host->host_lock);
622 return rc;
623}
624
625/* 503/*
626 * function: zfcp_scsi_eh_device_reset_handler 504 * function: zfcp_scsi_eh_device_reset_handler
627 * 505 *
@@ -651,8 +529,9 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
651 */ 529 */
652 if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET, 530 if (!atomic_test_mask(ZFCP_STATUS_UNIT_NOTSUPPUNITRESET,
653 &unit->status)) { 531 &unit->status)) {
654 retval = 532 retval = zfcp_task_management_function(unit,
655 zfcp_task_management_function(unit, FCP_LOGICAL_UNIT_RESET); 533 FCP_LOGICAL_UNIT_RESET,
534 scpnt);
656 if (retval) { 535 if (retval) {
657 ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit); 536 ZFCP_LOG_DEBUG("unit reset failed (unit=%p)\n", unit);
658 if (retval == -ENOTSUPP) 537 if (retval == -ENOTSUPP)
@@ -668,7 +547,7 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
668 goto out; 547 goto out;
669 } 548 }
670 } 549 }
671 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET); 550 retval = zfcp_task_management_function(unit, FCP_TARGET_RESET, scpnt);
672 if (retval) { 551 if (retval) {
673 ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit); 552 ZFCP_LOG_DEBUG("target reset failed (unit=%p)\n", unit);
674 retval = FAILED; 553 retval = FAILED;
@@ -681,12 +560,12 @@ zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
681} 560}
682 561
683static int 562static int
684zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags) 563zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
564 struct scsi_cmnd *scpnt)
685{ 565{
686 struct zfcp_adapter *adapter = unit->port->adapter; 566 struct zfcp_adapter *adapter = unit->port->adapter;
687 int retval;
688 int status;
689 struct zfcp_fsf_req *fsf_req; 567 struct zfcp_fsf_req *fsf_req;
568 int retval = 0;
690 569
691 /* issue task management function */ 570 /* issue task management function */
692 fsf_req = zfcp_fsf_send_fcp_command_task_management 571 fsf_req = zfcp_fsf_send_fcp_command_task_management
@@ -696,70 +575,63 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags)
696 "failed for unit 0x%016Lx on port 0x%016Lx on " 575 "failed for unit 0x%016Lx on port 0x%016Lx on "
697 "adapter %s\n", unit->fcp_lun, unit->port->wwpn, 576 "adapter %s\n", unit->fcp_lun, unit->port->wwpn,
698 zfcp_get_busid_by_adapter(adapter)); 577 zfcp_get_busid_by_adapter(adapter));
578 zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, scpnt);
699 retval = -ENOMEM; 579 retval = -ENOMEM;
700 goto out; 580 goto out;
701 } 581 }
702 582
703 retval = zfcp_fsf_req_wait_and_cleanup(fsf_req, 583 __wait_event(fsf_req->completion_wq,
704 ZFCP_UNINTERRUPTIBLE, &status); 584 fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
585
705 /* 586 /*
706 * check completion status of task management function 587 * check completion status of task management function
707 * (status should always be valid since no signals permitted)
708 */ 588 */
709 if (status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) 589 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
590 zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
710 retval = -EIO; 591 retval = -EIO;
711 else if (status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) 592 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
593 zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
712 retval = -ENOTSUPP; 594 retval = -ENOTSUPP;
713 else 595 } else
714 retval = 0; 596 zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
597
598 zfcp_fsf_req_free(fsf_req);
715 out: 599 out:
716 return retval; 600 return retval;
717} 601}
718 602
719/* 603/**
720 * function: zfcp_scsi_eh_bus_reset_handler 604 * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter)
721 *
722 * purpose:
723 *
724 * returns:
725 */ 605 */
726int 606int
727zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) 607zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
728{ 608{
729 int retval = 0; 609 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
730 struct zfcp_unit *unit; 610 struct zfcp_adapter *adapter = unit->port->adapter;
731 611
732 unit = (struct zfcp_unit *) scpnt->device->hostdata;
733 ZFCP_LOG_NORMAL("bus reset because of problems with " 612 ZFCP_LOG_NORMAL("bus reset because of problems with "
734 "unit 0x%016Lx\n", unit->fcp_lun); 613 "unit 0x%016Lx\n", unit->fcp_lun);
735 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 614 zfcp_erp_adapter_reopen(adapter, 0);
736 zfcp_erp_wait(unit->port->adapter); 615 zfcp_erp_wait(adapter);
737 retval = SUCCESS;
738 616
739 return retval; 617 return SUCCESS;
740} 618}
741 619
742/* 620/**
743 * function: zfcp_scsi_eh_host_reset_handler 621 * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter)
744 *
745 * purpose:
746 *
747 * returns:
748 */ 622 */
749int 623int
750zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 624zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
751{ 625{
752 int retval = 0; 626 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
753 struct zfcp_unit *unit; 627 struct zfcp_adapter *adapter = unit->port->adapter;
754 628
755 unit = (struct zfcp_unit *) scpnt->device->hostdata;
756 ZFCP_LOG_NORMAL("host reset because of problems with " 629 ZFCP_LOG_NORMAL("host reset because of problems with "
757 "unit 0x%016Lx\n", unit->fcp_lun); 630 "unit 0x%016Lx\n", unit->fcp_lun);
758 zfcp_erp_adapter_reopen(unit->port->adapter, 0); 631 zfcp_erp_adapter_reopen(adapter, 0);
759 zfcp_erp_wait(unit->port->adapter); 632 zfcp_erp_wait(adapter);
760 retval = SUCCESS;
761 633
762 return retval; 634 return SUCCESS;
763} 635}
764 636
765/* 637/*
@@ -826,10 +698,16 @@ void
826zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) 698zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
827{ 699{
828 struct Scsi_Host *shost; 700 struct Scsi_Host *shost;
701 struct zfcp_port *port;
829 702
830 shost = adapter->scsi_host; 703 shost = adapter->scsi_host;
831 if (!shost) 704 if (!shost)
832 return; 705 return;
706 read_lock_irq(&zfcp_data.config_lock);
707 list_for_each_entry(port, &adapter->port_list_head, list)
708 if (port->rport)
709 port->rport = NULL;
710 read_unlock_irq(&zfcp_data.config_lock);
833 fc_remove_host(shost); 711 fc_remove_host(shost);
834 scsi_remove_host(shost); 712 scsi_remove_host(shost);
835 scsi_host_put(shost); 713 scsi_host_put(shost);
@@ -904,18 +782,6 @@ zfcp_get_node_name(struct scsi_target *starget)
904 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 782 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
905} 783}
906 784
907void
908zfcp_set_fc_host_attrs(struct zfcp_adapter *adapter)
909{
910 struct Scsi_Host *shost = adapter->scsi_host;
911
912 fc_host_node_name(shost) = adapter->wwnn;
913 fc_host_port_name(shost) = adapter->wwpn;
914 strncpy(fc_host_serial_number(shost), adapter->serial_number,
915 min(FC_SERIAL_NUMBER_SIZE, 32));
916 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
917}
918
919struct fc_function_template zfcp_transport_functions = { 785struct fc_function_template zfcp_transport_functions = {
920 .get_starget_port_id = zfcp_get_port_id, 786 .get_starget_port_id = zfcp_get_port_id,
921 .get_starget_port_name = zfcp_get_port_name, 787 .get_starget_port_name = zfcp_get_port_name,
@@ -927,7 +793,10 @@ struct fc_function_template zfcp_transport_functions = {
927 .show_host_node_name = 1, 793 .show_host_node_name = 1,
928 .show_host_port_name = 1, 794 .show_host_port_name = 1,
929 .show_host_supported_classes = 1, 795 .show_host_supported_classes = 1,
796 .show_host_maxframe_size = 1,
930 .show_host_serial_number = 1, 797 .show_host_serial_number = 1,
798 .show_host_speed = 1,
799 .show_host_port_id = 1,
931}; 800};
932 801
933/** 802/**
diff --git a/drivers/s390/scsi/zfcp_sysfs_adapter.c b/drivers/s390/scsi/zfcp_sysfs_adapter.c
index e7345a74800a..0cd435280e7d 100644
--- a/drivers/s390/scsi/zfcp_sysfs_adapter.c
+++ b/drivers/s390/scsi/zfcp_sysfs_adapter.c
@@ -62,21 +62,18 @@ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, struct devi
62static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL); 62static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_adapter_##_name##_show, NULL);
63 63
64ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status)); 64ZFCP_DEFINE_ADAPTER_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
65ZFCP_DEFINE_ADAPTER_ATTR(wwnn, "0x%016llx\n", adapter->wwnn);
66ZFCP_DEFINE_ADAPTER_ATTR(wwpn, "0x%016llx\n", adapter->wwpn);
67ZFCP_DEFINE_ADAPTER_ATTR(s_id, "0x%06x\n", adapter->s_id);
68ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn); 65ZFCP_DEFINE_ADAPTER_ATTR(peer_wwnn, "0x%016llx\n", adapter->peer_wwnn);
69ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn); 66ZFCP_DEFINE_ADAPTER_ATTR(peer_wwpn, "0x%016llx\n", adapter->peer_wwpn);
70ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id); 67ZFCP_DEFINE_ADAPTER_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
68ZFCP_DEFINE_ADAPTER_ATTR(physical_wwpn, "0x%016llx\n", adapter->physical_wwpn);
69ZFCP_DEFINE_ADAPTER_ATTR(physical_s_id, "0x%06x\n", adapter->physical_s_id);
71ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version); 70ZFCP_DEFINE_ADAPTER_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
72ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version); 71ZFCP_DEFINE_ADAPTER_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
73ZFCP_DEFINE_ADAPTER_ATTR(fc_link_speed, "%d Gb/s\n", adapter->fc_link_speed);
74ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class); 72ZFCP_DEFINE_ADAPTER_ATTR(fc_service_class, "%d\n", adapter->fc_service_class);
75ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n", 73ZFCP_DEFINE_ADAPTER_ATTR(fc_topology, "%s\n",
76 fc_topologies[adapter->fc_topology]); 74 fc_topologies[adapter->fc_topology]);
77ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n", 75ZFCP_DEFINE_ADAPTER_ATTR(hardware_version, "0x%08x\n",
78 adapter->hardware_version); 76 adapter->hardware_version);
79ZFCP_DEFINE_ADAPTER_ATTR(serial_number, "%17s\n", adapter->serial_number);
80ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no); 77ZFCP_DEFINE_ADAPTER_ATTR(scsi_host_no, "0x%x\n", adapter->scsi_host_no);
81ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask 78ZFCP_DEFINE_ADAPTER_ATTR(in_recovery, "%d\n", atomic_test_mask
82 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status)); 79 (ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status));
@@ -255,21 +252,18 @@ static struct attribute *zfcp_adapter_attrs[] = {
255 &dev_attr_in_recovery.attr, 252 &dev_attr_in_recovery.attr,
256 &dev_attr_port_remove.attr, 253 &dev_attr_port_remove.attr,
257 &dev_attr_port_add.attr, 254 &dev_attr_port_add.attr,
258 &dev_attr_wwnn.attr,
259 &dev_attr_wwpn.attr,
260 &dev_attr_s_id.attr,
261 &dev_attr_peer_wwnn.attr, 255 &dev_attr_peer_wwnn.attr,
262 &dev_attr_peer_wwpn.attr, 256 &dev_attr_peer_wwpn.attr,
263 &dev_attr_peer_d_id.attr, 257 &dev_attr_peer_d_id.attr,
258 &dev_attr_physical_wwpn.attr,
259 &dev_attr_physical_s_id.attr,
264 &dev_attr_card_version.attr, 260 &dev_attr_card_version.attr,
265 &dev_attr_lic_version.attr, 261 &dev_attr_lic_version.attr,
266 &dev_attr_fc_link_speed.attr,
267 &dev_attr_fc_service_class.attr, 262 &dev_attr_fc_service_class.attr,
268 &dev_attr_fc_topology.attr, 263 &dev_attr_fc_topology.attr,
269 &dev_attr_scsi_host_no.attr, 264 &dev_attr_scsi_host_no.attr,
270 &dev_attr_status.attr, 265 &dev_attr_status.attr,
271 &dev_attr_hardware_version.attr, 266 &dev_attr_hardware_version.attr,
272 &dev_attr_serial_number.attr,
273 NULL 267 NULL
274}; 268};
275 269
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index dbad7f35eb0a..24ed5893b4f0 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -14,7 +14,7 @@
14#include <linux/major.h> 14#include <linux/major.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/miscdevice.h> 16#include <linux/miscdevice.h>
17#include <linux/ioport.h> /* request_region, check_region */ 17#include <linux/ioport.h> /* request_region */
18#include <asm/atomic.h> 18#include <asm/atomic.h>
19#include <asm/ebus.h> /* EBus device */ 19#include <asm/ebus.h> /* EBus device */
20#include <asm/oplib.h> /* OpenProm Library */ 20#include <asm/oplib.h> /* OpenProm Library */
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a6ac61611f35..a748fbfb6692 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -60,6 +60,7 @@
60 Remove un-needed eh_abort handler. 60 Remove un-needed eh_abort handler.
61 Add support for embedded firmware error strings. 61 Add support for embedded firmware error strings.
62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
63 2.26.02.004 - Add support for 9550SX controllers.
63*/ 64*/
64 65
65#include <linux/module.h> 66#include <linux/module.h>
@@ -82,7 +83,7 @@
82#include "3w-9xxx.h" 83#include "3w-9xxx.h"
83 84
84/* Globals */ 85/* Globals */
85#define TW_DRIVER_VERSION "2.26.02.003" 86#define TW_DRIVER_VERSION "2.26.02.004"
86static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 87static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
87static unsigned int twa_device_extension_count; 88static unsigned int twa_device_extension_count;
88static int twa_major = -1; 89static int twa_major = -1;
@@ -892,11 +893,6 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
892 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 893 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
893 } 894 }
894 895
895 if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) {
896 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing");
897 writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
898 }
899
900 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { 896 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
901 if (tw_dev->reset_print == 0) { 897 if (tw_dev->reset_print == 0) {
902 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); 898 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
@@ -930,6 +926,36 @@ out:
930 return retval; 926 return retval;
931} /* End twa_empty_response_queue() */ 927} /* End twa_empty_response_queue() */
932 928
929/* This function will clear the pchip/response queue on 9550SX */
930static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
931{
932 u32 status_reg_value, response_que_value;
933 int count = 0, retval = 1;
934
935 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) {
936 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
937
938 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
939 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
940 if ((response_que_value & TW_9550SX_DRAIN_COMPLETED) == TW_9550SX_DRAIN_COMPLETED) {
941 /* P-chip settle time */
942 msleep(500);
943 retval = 0;
944 goto out;
945 }
946 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
947 count++;
948 }
949 if (count == TW_MAX_RESPONSE_DRAIN)
950 goto out;
951
952 retval = 0;
953 } else
954 retval = 0;
955out:
956 return retval;
957} /* End twa_empty_response_queue_large() */
958
933/* This function passes sense keys from firmware to scsi layer */ 959/* This function passes sense keys from firmware to scsi layer */
934static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) 960static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
935{ 961{
@@ -1613,8 +1639,16 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1613 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; 1639 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1614 1640
1615 while (tries < TW_MAX_RESET_TRIES) { 1641 while (tries < TW_MAX_RESET_TRIES) {
1616 if (do_soft_reset) 1642 if (do_soft_reset) {
1617 TW_SOFT_RESET(tw_dev); 1643 TW_SOFT_RESET(tw_dev);
1644 /* Clear pchip/response queue on 9550SX */
1645 if (twa_empty_response_queue_large(tw_dev)) {
1646 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1647 do_soft_reset = 1;
1648 tries++;
1649 continue;
1650 }
1651 }
1618 1652
1619 /* Make sure controller is in a good state */ 1653 /* Make sure controller is in a good state */
1620 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { 1654 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
@@ -2034,7 +2068,10 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2034 goto out_free_device_extension; 2068 goto out_free_device_extension;
2035 } 2069 }
2036 2070
2037 mem_addr = pci_resource_start(pdev, 1); 2071 if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
2072 mem_addr = pci_resource_start(pdev, 1);
2073 else
2074 mem_addr = pci_resource_start(pdev, 2);
2038 2075
2039 /* Save base address */ 2076 /* Save base address */
2040 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); 2077 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
@@ -2148,6 +2185,8 @@ static void twa_remove(struct pci_dev *pdev)
2148static struct pci_device_id twa_pci_tbl[] __devinitdata = { 2185static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2149 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, 2186 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2150 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2188 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2151 { } 2190 { }
2152}; 2191};
2153MODULE_DEVICE_TABLE(pci, twa_pci_tbl); 2192MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 8c8ecbed3b58..46f22cdc8298 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -267,7 +267,6 @@ static twa_message_type twa_error_table[] = {
267#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000 267#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
268#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000 268#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
269#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000 269#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
270#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
271 270
272/* Status register bit definitions */ 271/* Status register bit definitions */
273#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000 272#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
@@ -285,9 +284,8 @@ static twa_message_type twa_error_table[] = {
285#define TW_STATUS_MICROCONTROLLER_READY 0x00002000 284#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
286#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000 285#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
287#define TW_STATUS_EXPECTED_BITS 0x00002000 286#define TW_STATUS_EXPECTED_BITS 0x00002000
288#define TW_STATUS_UNEXPECTED_BITS 0x00F00008 287#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
289#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008 288#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
290#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
291 289
292/* RESPONSE QUEUE BIT DEFINITIONS */ 290/* RESPONSE QUEUE BIT DEFINITIONS */
293#define TW_RESPONSE_ID_MASK 0x00000FF0 291#define TW_RESPONSE_ID_MASK 0x00000FF0
@@ -324,9 +322,9 @@ static twa_message_type twa_error_table[] = {
324 322
325/* Compatibility defines */ 323/* Compatibility defines */
326#define TW_9000_ARCH_ID 0x5 324#define TW_9000_ARCH_ID 0x5
327#define TW_CURRENT_DRIVER_SRL 28 325#define TW_CURRENT_DRIVER_SRL 30
328#define TW_CURRENT_DRIVER_BUILD 9 326#define TW_CURRENT_DRIVER_BUILD 80
329#define TW_CURRENT_DRIVER_BRANCH 4 327#define TW_CURRENT_DRIVER_BRANCH 0
330 328
331/* Phase defines */ 329/* Phase defines */
332#define TW_PHASE_INITIAL 0 330#define TW_PHASE_INITIAL 0
@@ -334,6 +332,7 @@ static twa_message_type twa_error_table[] = {
334#define TW_PHASE_SGLIST 2 332#define TW_PHASE_SGLIST 2
335 333
336/* Misc defines */ 334/* Misc defines */
335#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
337#define TW_SECTOR_SIZE 512 336#define TW_SECTOR_SIZE 512
338#define TW_ALIGNMENT_9000 4 /* 4 bytes */ 337#define TW_ALIGNMENT_9000 4 /* 4 bytes */
339#define TW_ALIGNMENT_9000_SGL 0x3 338#define TW_ALIGNMENT_9000_SGL 0x3
@@ -417,6 +416,9 @@ static twa_message_type twa_error_table[] = {
417#ifndef PCI_DEVICE_ID_3WARE_9000 416#ifndef PCI_DEVICE_ID_3WARE_9000
418#define PCI_DEVICE_ID_3WARE_9000 0x1002 417#define PCI_DEVICE_ID_3WARE_9000 0x1002
419#endif 418#endif
419#ifndef PCI_DEVICE_ID_3WARE_9550SX
420#define PCI_DEVICE_ID_3WARE_9550SX 0x1003
421#endif
420 422
421/* Bitmask macros to eliminate bitfields */ 423/* Bitmask macros to eliminate bitfields */
422 424
@@ -443,6 +445,7 @@ static twa_message_type twa_error_table[] = {
443#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4) 445#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
444#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8)) 446#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
445#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC) 447#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
448#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
446#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 449#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
447#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 450#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
448#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 451#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1e491ae9a7c5..e2e3d8671930 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC395x) += dc395x.o
99obj-$(CONFIG_SCSI_DC390T) += tmscsim.o 99obj-$(CONFIG_SCSI_DC390T) += tmscsim.o
100obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o 100obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
101obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 101obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
102obj-$(CONFIG_MEGARAID_SAS) += megaraid/
102obj-$(CONFIG_SCSI_ACARD) += atp870u.o 103obj-$(CONFIG_SCSI_ACARD) += atp870u.o
103obj-$(CONFIG_SCSI_SUNESP) += esp.o 104obj-$(CONFIG_SCSI_SUNESP) += esp.o
104obj-$(CONFIG_SCSI_GDTH) += gdth.o 105obj-$(CONFIG_SCSI_GDTH) += gdth.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a8e3dfcd0dc7..93416f760e5a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -313,18 +313,37 @@ int aac_get_containers(struct aac_dev *dev)
313 } 313 }
314 dresp = (struct aac_mount *)fib_data(fibptr); 314 dresp = (struct aac_mount *)fib_data(fibptr);
315 315
316 if ((le32_to_cpu(dresp->status) == ST_OK) &&
317 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
318 dinfo->command = cpu_to_le32(VM_NameServe64);
319 dinfo->count = cpu_to_le32(index);
320 dinfo->type = cpu_to_le32(FT_FILESYS);
321
322 if (fib_send(ContainerCommand,
323 fibptr,
324 sizeof(struct aac_query_mount),
325 FsaNormal,
326 1, 1,
327 NULL, NULL) < 0)
328 continue;
329 } else
330 dresp->mnt[0].capacityhigh = 0;
331
316 dprintk ((KERN_DEBUG 332 dprintk ((KERN_DEBUG
317 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n", 333 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
318 (int)index, (int)le32_to_cpu(dresp->status), 334 (int)index, (int)le32_to_cpu(dresp->status),
319 (int)le32_to_cpu(dresp->mnt[0].vol), 335 (int)le32_to_cpu(dresp->mnt[0].vol),
320 (int)le32_to_cpu(dresp->mnt[0].state), 336 (int)le32_to_cpu(dresp->mnt[0].state),
321 (unsigned)le32_to_cpu(dresp->mnt[0].capacity))); 337 ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
338 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
322 if ((le32_to_cpu(dresp->status) == ST_OK) && 339 if ((le32_to_cpu(dresp->status) == ST_OK) &&
323 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 340 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
324 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 341 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
325 fsa_dev_ptr[index].valid = 1; 342 fsa_dev_ptr[index].valid = 1;
326 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol); 343 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
327 fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity); 344 fsa_dev_ptr[index].size
345 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
346 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
328 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
329 fsa_dev_ptr[index].ro = 1; 348 fsa_dev_ptr[index].ro = 1;
330 } 349 }
@@ -460,7 +479,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
460 * is updated in the struct fsa_dev_info structure rather than returned. 479 * is updated in the struct fsa_dev_info structure rather than returned.
461 */ 480 */
462 481
463static int probe_container(struct aac_dev *dev, int cid) 482int probe_container(struct aac_dev *dev, int cid)
464{ 483{
465 struct fsa_dev_info *fsa_dev_ptr; 484 struct fsa_dev_info *fsa_dev_ptr;
466 int status; 485 int status;
@@ -497,11 +516,29 @@ static int probe_container(struct aac_dev *dev, int cid)
497 dresp = (struct aac_mount *) fib_data(fibptr); 516 dresp = (struct aac_mount *) fib_data(fibptr);
498 517
499 if ((le32_to_cpu(dresp->status) == ST_OK) && 518 if ((le32_to_cpu(dresp->status) == ST_OK) &&
519 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
520 dinfo->command = cpu_to_le32(VM_NameServe64);
521 dinfo->count = cpu_to_le32(cid);
522 dinfo->type = cpu_to_le32(FT_FILESYS);
523
524 if (fib_send(ContainerCommand,
525 fibptr,
526 sizeof(struct aac_query_mount),
527 FsaNormal,
528 1, 1,
529 NULL, NULL) < 0)
530 goto error;
531 } else
532 dresp->mnt[0].capacityhigh = 0;
533
534 if ((le32_to_cpu(dresp->status) == ST_OK) &&
500 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 535 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
501 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 536 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
502 fsa_dev_ptr[cid].valid = 1; 537 fsa_dev_ptr[cid].valid = 1;
503 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol); 538 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
504 fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity); 539 fsa_dev_ptr[cid].size
540 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
541 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
505 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 542 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
506 fsa_dev_ptr[cid].ro = 1; 543 fsa_dev_ptr[cid].ro = 1;
507 } 544 }
@@ -655,7 +692,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
655 fibptr, 692 fibptr,
656 sizeof(*info), 693 sizeof(*info),
657 FsaNormal, 694 FsaNormal,
658 1, 1, 695 -1, 1, /* First `interrupt' command uses special wait */
659 NULL, 696 NULL,
660 NULL); 697 NULL);
661 698
@@ -806,8 +843,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
806 if (!(dev->raw_io_interface)) { 843 if (!(dev->raw_io_interface)) {
807 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 844 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
808 sizeof(struct aac_fibhdr) - 845 sizeof(struct aac_fibhdr) -
809 sizeof(struct aac_write) + sizeof(struct sgmap)) / 846 sizeof(struct aac_write) + sizeof(struct sgentry)) /
810 sizeof(struct sgmap); 847 sizeof(struct sgentry);
811 if (dev->dac_support) { 848 if (dev->dac_support) {
812 /* 849 /*
813 * 38 scatter gather elements 850 * 38 scatter gather elements
@@ -816,8 +853,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
816 (dev->max_fib_size - 853 (dev->max_fib_size -
817 sizeof(struct aac_fibhdr) - 854 sizeof(struct aac_fibhdr) -
818 sizeof(struct aac_write64) + 855 sizeof(struct aac_write64) +
819 sizeof(struct sgmap64)) / 856 sizeof(struct sgentry64)) /
820 sizeof(struct sgmap64); 857 sizeof(struct sgentry64);
821 } 858 }
822 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 859 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
823 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 860 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
@@ -854,7 +891,40 @@ static void io_callback(void *context, struct fib * fibptr)
854 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 891 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
855 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); 892 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
856 893
857 dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies)); 894 if (nblank(dprintk(x))) {
895 u64 lba;
896 switch (scsicmd->cmnd[0]) {
897 case WRITE_6:
898 case READ_6:
899 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
900 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
901 break;
902 case WRITE_16:
903 case READ_16:
904 lba = ((u64)scsicmd->cmnd[2] << 56) |
905 ((u64)scsicmd->cmnd[3] << 48) |
906 ((u64)scsicmd->cmnd[4] << 40) |
907 ((u64)scsicmd->cmnd[5] << 32) |
908 ((u64)scsicmd->cmnd[6] << 24) |
909 (scsicmd->cmnd[7] << 16) |
910 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
911 break;
912 case WRITE_12:
913 case READ_12:
914 lba = ((u64)scsicmd->cmnd[2] << 24) |
915 (scsicmd->cmnd[3] << 16) |
916 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
917 break;
918 default:
919 lba = ((u64)scsicmd->cmnd[2] << 24) |
920 (scsicmd->cmnd[3] << 16) |
921 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
922 break;
923 }
924 printk(KERN_DEBUG
925 "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
926 smp_processor_id(), (unsigned long long)lba, jiffies);
927 }
858 928
859 if (fibptr == NULL) 929 if (fibptr == NULL)
860 BUG(); 930 BUG();
@@ -895,7 +965,7 @@ static void io_callback(void *context, struct fib * fibptr)
895 965
896static int aac_read(struct scsi_cmnd * scsicmd, int cid) 966static int aac_read(struct scsi_cmnd * scsicmd, int cid)
897{ 967{
898 u32 lba; 968 u64 lba;
899 u32 count; 969 u32 count;
900 int status; 970 int status;
901 971
@@ -907,23 +977,69 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
907 /* 977 /*
908 * Get block address and transfer length 978 * Get block address and transfer length
909 */ 979 */
910 if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */ 980 switch (scsicmd->cmnd[0]) {
911 { 981 case READ_6:
912 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid)); 982 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
913 983
914 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 984 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
985 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
915 count = scsicmd->cmnd[4]; 986 count = scsicmd->cmnd[4];
916 987
917 if (count == 0) 988 if (count == 0)
918 count = 256; 989 count = 256;
919 } else { 990 break;
991 case READ_16:
992 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid));
993
994 lba = ((u64)scsicmd->cmnd[2] << 56) |
995 ((u64)scsicmd->cmnd[3] << 48) |
996 ((u64)scsicmd->cmnd[4] << 40) |
997 ((u64)scsicmd->cmnd[5] << 32) |
998 ((u64)scsicmd->cmnd[6] << 24) |
999 (scsicmd->cmnd[7] << 16) |
1000 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1001 count = (scsicmd->cmnd[10] << 24) |
1002 (scsicmd->cmnd[11] << 16) |
1003 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1004 break;
1005 case READ_12:
1006 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid));
1007
1008 lba = ((u64)scsicmd->cmnd[2] << 24) |
1009 (scsicmd->cmnd[3] << 16) |
1010 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1011 count = (scsicmd->cmnd[6] << 24) |
1012 (scsicmd->cmnd[7] << 16) |
1013 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1014 break;
1015 default:
920 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid)); 1016 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
921 1017
922 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1018 lba = ((u64)scsicmd->cmnd[2] << 24) |
1019 (scsicmd->cmnd[3] << 16) |
1020 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
923 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1021 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1022 break;
924 } 1023 }
925 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", 1024 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
926 smp_processor_id(), (unsigned long long)lba, jiffies)); 1025 smp_processor_id(), (unsigned long long)lba, jiffies));
1026 if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) &&
1027 (lba & 0xffffffff00000000LL)) {
1028 dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
1029 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1030 SAM_STAT_CHECK_CONDITION;
1031 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1032 HARDWARE_ERROR,
1033 SENCODE_INTERNAL_TARGET_FAILURE,
1034 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1035 0, 0);
1036 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1037 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1038 ? sizeof(scsicmd->sense_buffer)
1039 : sizeof(dev->fsa_dev[cid].sense_data));
1040 scsicmd->scsi_done(scsicmd);
1041 return 0;
1042 }
927 /* 1043 /*
928 * Alocate and initialize a Fib 1044 * Alocate and initialize a Fib
929 */ 1045 */
@@ -936,8 +1052,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
936 if (dev->raw_io_interface) { 1052 if (dev->raw_io_interface) {
937 struct aac_raw_io *readcmd; 1053 struct aac_raw_io *readcmd;
938 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); 1054 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
939 readcmd->block[0] = cpu_to_le32(lba); 1055 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
940 readcmd->block[1] = 0; 1056 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
941 readcmd->count = cpu_to_le32(count<<9); 1057 readcmd->count = cpu_to_le32(count<<9);
942 readcmd->cid = cpu_to_le16(cid); 1058 readcmd->cid = cpu_to_le16(cid);
943 readcmd->flags = cpu_to_le16(1); 1059 readcmd->flags = cpu_to_le16(1);
@@ -964,7 +1080,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
964 readcmd->command = cpu_to_le32(VM_CtHostRead64); 1080 readcmd->command = cpu_to_le32(VM_CtHostRead64);
965 readcmd->cid = cpu_to_le16(cid); 1081 readcmd->cid = cpu_to_le16(cid);
966 readcmd->sector_count = cpu_to_le16(count); 1082 readcmd->sector_count = cpu_to_le16(count);
967 readcmd->block = cpu_to_le32(lba); 1083 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
968 readcmd->pad = 0; 1084 readcmd->pad = 0;
969 readcmd->flags = 0; 1085 readcmd->flags = 0;
970 1086
@@ -989,7 +1105,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
989 readcmd = (struct aac_read *) fib_data(cmd_fibcontext); 1105 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
990 readcmd->command = cpu_to_le32(VM_CtBlockRead); 1106 readcmd->command = cpu_to_le32(VM_CtBlockRead);
991 readcmd->cid = cpu_to_le32(cid); 1107 readcmd->cid = cpu_to_le32(cid);
992 readcmd->block = cpu_to_le32(lba); 1108 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
993 readcmd->count = cpu_to_le32(count * 512); 1109 readcmd->count = cpu_to_le32(count * 512);
994 1110
995 aac_build_sg(scsicmd, &readcmd->sg); 1111 aac_build_sg(scsicmd, &readcmd->sg);
@@ -1031,7 +1147,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1031 1147
1032static int aac_write(struct scsi_cmnd * scsicmd, int cid) 1148static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1033{ 1149{
1034 u32 lba; 1150 u64 lba;
1035 u32 count; 1151 u32 count;
1036 int status; 1152 int status;
1037 u16 fibsize; 1153 u16 fibsize;
@@ -1048,13 +1164,48 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1048 count = scsicmd->cmnd[4]; 1164 count = scsicmd->cmnd[4];
1049 if (count == 0) 1165 if (count == 0)
1050 count = 256; 1166 count = 256;
1167 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1168 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid));
1169
1170 lba = ((u64)scsicmd->cmnd[2] << 56) |
1171 ((u64)scsicmd->cmnd[3] << 48) |
1172 ((u64)scsicmd->cmnd[4] << 40) |
1173 ((u64)scsicmd->cmnd[5] << 32) |
1174 ((u64)scsicmd->cmnd[6] << 24) |
1175 (scsicmd->cmnd[7] << 16) |
1176 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1177 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
1178 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1179 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
1180 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid));
1181
1182 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
1183 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1184 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
1185 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1051 } else { 1186 } else {
1052 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid)); 1187 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
1053 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1188 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1054 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1189 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1055 } 1190 }
1056 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", 1191 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1057 smp_processor_id(), (unsigned long long)lba, jiffies)); 1192 smp_processor_id(), (unsigned long long)lba, jiffies));
1193 if ((!(dev->raw_io_interface) || !(dev->raw_io_64))
1194 && (lba & 0xffffffff00000000LL)) {
1195 dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
1196 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1197 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1198 HARDWARE_ERROR,
1199 SENCODE_INTERNAL_TARGET_FAILURE,
1200 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1201 0, 0);
1202 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1203 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1204 ? sizeof(scsicmd->sense_buffer)
1205 : sizeof(dev->fsa_dev[cid].sense_data));
1206 scsicmd->scsi_done(scsicmd);
1207 return 0;
1208 }
1058 /* 1209 /*
1059 * Allocate and initialize a Fib then setup a BlockWrite command 1210 * Allocate and initialize a Fib then setup a BlockWrite command
1060 */ 1211 */
@@ -1068,8 +1219,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1068 if (dev->raw_io_interface) { 1219 if (dev->raw_io_interface) {
1069 struct aac_raw_io *writecmd; 1220 struct aac_raw_io *writecmd;
1070 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); 1221 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1071 writecmd->block[0] = cpu_to_le32(lba); 1222 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1072 writecmd->block[1] = 0; 1223 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1073 writecmd->count = cpu_to_le32(count<<9); 1224 writecmd->count = cpu_to_le32(count<<9);
1074 writecmd->cid = cpu_to_le16(cid); 1225 writecmd->cid = cpu_to_le16(cid);
1075 writecmd->flags = 0; 1226 writecmd->flags = 0;
@@ -1096,7 +1247,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1096 writecmd->command = cpu_to_le32(VM_CtHostWrite64); 1247 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1097 writecmd->cid = cpu_to_le16(cid); 1248 writecmd->cid = cpu_to_le16(cid);
1098 writecmd->sector_count = cpu_to_le16(count); 1249 writecmd->sector_count = cpu_to_le16(count);
1099 writecmd->block = cpu_to_le32(lba); 1250 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1100 writecmd->pad = 0; 1251 writecmd->pad = 0;
1101 writecmd->flags = 0; 1252 writecmd->flags = 0;
1102 1253
@@ -1121,7 +1272,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1121 writecmd = (struct aac_write *) fib_data(cmd_fibcontext); 1272 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
1122 writecmd->command = cpu_to_le32(VM_CtBlockWrite); 1273 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1123 writecmd->cid = cpu_to_le32(cid); 1274 writecmd->cid = cpu_to_le32(cid);
1124 writecmd->block = cpu_to_le32(lba); 1275 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1125 writecmd->count = cpu_to_le32(count * 512); 1276 writecmd->count = cpu_to_le32(count * 512);
1126 writecmd->sg.count = cpu_to_le32(1); 1277 writecmd->sg.count = cpu_to_le32(1);
1127 /* ->stable is not used - it did mean which type of write */ 1278 /* ->stable is not used - it did mean which type of write */
@@ -1310,11 +1461,18 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1310 */ 1461 */
1311 if ((fsa_dev_ptr[cid].valid & 1) == 0) { 1462 if ((fsa_dev_ptr[cid].valid & 1) == 0) {
1312 switch (scsicmd->cmnd[0]) { 1463 switch (scsicmd->cmnd[0]) {
1464 case SERVICE_ACTION_IN:
1465 if (!(dev->raw_io_interface) ||
1466 !(dev->raw_io_64) ||
1467 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
1468 break;
1313 case INQUIRY: 1469 case INQUIRY:
1314 case READ_CAPACITY: 1470 case READ_CAPACITY:
1315 case TEST_UNIT_READY: 1471 case TEST_UNIT_READY:
1316 spin_unlock_irq(host->host_lock); 1472 spin_unlock_irq(host->host_lock);
1317 probe_container(dev, cid); 1473 probe_container(dev, cid);
1474 if ((fsa_dev_ptr[cid].valid & 1) == 0)
1475 fsa_dev_ptr[cid].valid = 0;
1318 spin_lock_irq(host->host_lock); 1476 spin_lock_irq(host->host_lock);
1319 if (fsa_dev_ptr[cid].valid == 0) { 1477 if (fsa_dev_ptr[cid].valid == 0) {
1320 scsicmd->result = DID_NO_CONNECT << 16; 1478 scsicmd->result = DID_NO_CONNECT << 16;
@@ -1375,7 +1533,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1375 memset(&inq_data, 0, sizeof (struct inquiry_data)); 1533 memset(&inq_data, 0, sizeof (struct inquiry_data));
1376 1534
1377 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ 1535 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
1378 inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
1379 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 1536 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1380 inq_data.inqd_len = 31; 1537 inq_data.inqd_len = 31;
1381 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 1538 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
@@ -1397,13 +1554,55 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1397 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1554 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1398 return aac_get_container_name(scsicmd, cid); 1555 return aac_get_container_name(scsicmd, cid);
1399 } 1556 }
1557 case SERVICE_ACTION_IN:
1558 if (!(dev->raw_io_interface) ||
1559 !(dev->raw_io_64) ||
1560 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
1561 break;
1562 {
1563 u64 capacity;
1564 char cp[12];
1565 unsigned int offset = 0;
1566
1567 dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
1568 capacity = fsa_dev_ptr[cid].size - 1;
1569 if (scsicmd->cmnd[13] > 12) {
1570 offset = scsicmd->cmnd[13] - 12;
1571 if (offset > sizeof(cp))
1572 break;
1573 memset(cp, 0, offset);
1574 aac_internal_transfer(scsicmd, cp, 0, offset);
1575 }
1576 cp[0] = (capacity >> 56) & 0xff;
1577 cp[1] = (capacity >> 48) & 0xff;
1578 cp[2] = (capacity >> 40) & 0xff;
1579 cp[3] = (capacity >> 32) & 0xff;
1580 cp[4] = (capacity >> 24) & 0xff;
1581 cp[5] = (capacity >> 16) & 0xff;
1582 cp[6] = (capacity >> 8) & 0xff;
1583 cp[7] = (capacity >> 0) & 0xff;
1584 cp[8] = 0;
1585 cp[9] = 0;
1586 cp[10] = 2;
1587 cp[11] = 0;
1588 aac_internal_transfer(scsicmd, cp, offset, sizeof(cp));
1589
1590 /* Do not cache partition table for arrays */
1591 scsicmd->device->removable = 1;
1592
1593 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1594 scsicmd->scsi_done(scsicmd);
1595
1596 return 0;
1597 }
1598
1400 case READ_CAPACITY: 1599 case READ_CAPACITY:
1401 { 1600 {
1402 u32 capacity; 1601 u32 capacity;
1403 char cp[8]; 1602 char cp[8];
1404 1603
1405 dprintk((KERN_DEBUG "READ CAPACITY command.\n")); 1604 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1406 if (fsa_dev_ptr[cid].size <= 0x100000000LL) 1605 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
1407 capacity = fsa_dev_ptr[cid].size - 1; 1606 capacity = fsa_dev_ptr[cid].size - 1;
1408 else 1607 else
1409 capacity = (u32)-1; 1608 capacity = (u32)-1;
@@ -1417,6 +1616,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1417 cp[6] = 2; 1616 cp[6] = 2;
1418 cp[7] = 0; 1617 cp[7] = 0;
1419 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp)); 1618 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
1619 /* Do not cache partition table for arrays */
1620 scsicmd->device->removable = 1;
1420 1621
1421 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1622 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1422 scsicmd->scsi_done(scsicmd); 1623 scsicmd->scsi_done(scsicmd);
@@ -1497,6 +1698,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1497 { 1698 {
1498 case READ_6: 1699 case READ_6:
1499 case READ_10: 1700 case READ_10:
1701 case READ_12:
1702 case READ_16:
1500 /* 1703 /*
1501 * Hack to keep track of ordinal number of the device that 1704 * Hack to keep track of ordinal number of the device that
1502 * corresponds to a container. Needed to convert 1705 * corresponds to a container. Needed to convert
@@ -1504,17 +1707,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1504 */ 1707 */
1505 1708
1506 spin_unlock_irq(host->host_lock); 1709 spin_unlock_irq(host->host_lock);
1507 if (scsicmd->request->rq_disk) 1710 if (scsicmd->request->rq_disk)
1508 memcpy(fsa_dev_ptr[cid].devname, 1711 strlcpy(fsa_dev_ptr[cid].devname,
1509 scsicmd->request->rq_disk->disk_name, 1712 scsicmd->request->rq_disk->disk_name,
1510 8); 1713 min(sizeof(fsa_dev_ptr[cid].devname),
1511 1714 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
1512 ret = aac_read(scsicmd, cid); 1715 ret = aac_read(scsicmd, cid);
1513 spin_lock_irq(host->host_lock); 1716 spin_lock_irq(host->host_lock);
1514 return ret; 1717 return ret;
1515 1718
1516 case WRITE_6: 1719 case WRITE_6:
1517 case WRITE_10: 1720 case WRITE_10:
1721 case WRITE_12:
1722 case WRITE_16:
1518 spin_unlock_irq(host->host_lock); 1723 spin_unlock_irq(host->host_lock);
1519 ret = aac_write(scsicmd, cid); 1724 ret = aac_write(scsicmd, cid);
1520 spin_lock_irq(host->host_lock); 1725 spin_lock_irq(host->host_lock);
@@ -1745,6 +1950,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1745 case WRITE_10: 1950 case WRITE_10:
1746 case READ_12: 1951 case READ_12:
1747 case WRITE_12: 1952 case WRITE_12:
1953 case READ_16:
1954 case WRITE_16:
1748 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) { 1955 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1749 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); 1956 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1750 } else { 1957 } else {
@@ -1850,8 +2057,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1850 sizeof(scsicmd->sense_buffer) : 2057 sizeof(scsicmd->sense_buffer) :
1851 le32_to_cpu(srbreply->sense_data_size); 2058 le32_to_cpu(srbreply->sense_data_size);
1852#ifdef AAC_DETAILED_STATUS_INFO 2059#ifdef AAC_DETAILED_STATUS_INFO
1853 dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", 2060 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
1854 le32_to_cpu(srbreply->status), len)); 2061 le32_to_cpu(srbreply->status), len);
1855#endif 2062#endif
1856 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); 2063 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1857 2064
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index e40528185d48..4a99d2f000f4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,6 +1,10 @@
1#if (!defined(dprintk)) 1#if (!defined(dprintk))
2# define dprintk(x) 2# define dprintk(x)
3#endif 3#endif
4/* eg: if (nblank(dprintk(x))) */
5#define _nblank(x) #x
6#define nblank(x) _nblank(x)[0]
7
4 8
5/*------------------------------------------------------------------------------ 9/*------------------------------------------------------------------------------
6 * D E F I N E S 10 * D E F I N E S
@@ -302,7 +306,6 @@ enum aac_queue_types {
302 */ 306 */
303 307
304#define FsaNormal 1 308#define FsaNormal 1
305#define FsaHigh 2
306 309
307/* 310/*
308 * Define the FIB. The FIB is the where all the requested data and 311 * Define the FIB. The FIB is the where all the requested data and
@@ -546,8 +549,6 @@ struct aac_queue {
546 /* This is only valid for adapter to host command queues. */ 549 /* This is only valid for adapter to host command queues. */
547 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */ 550 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
548 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ 551 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
549 unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
550 u32 padding; /* Padding - FIXME - can remove I believe */
551 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ 552 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
552 /* only valid for command queues which receive entries from the adapter. */ 553 /* only valid for command queues which receive entries from the adapter. */
553 struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */ 554 struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
@@ -776,7 +777,9 @@ struct fsa_dev_info {
776 u64 last; 777 u64 last;
777 u64 size; 778 u64 size;
778 u32 type; 779 u32 type;
780 u32 config_waiting_on;
779 u16 queue_depth; 781 u16 queue_depth;
782 u8 config_needed;
780 u8 valid; 783 u8 valid;
781 u8 ro; 784 u8 ro;
782 u8 locked; 785 u8 locked;
@@ -1012,6 +1015,7 @@ struct aac_dev
1012 /* macro side-effects BEWARE */ 1015 /* macro side-effects BEWARE */
1013# define raw_io_interface \ 1016# define raw_io_interface \
1014 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1017 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
1018 u8 raw_io_64;
1015 u8 printf_enabled; 1019 u8 printf_enabled;
1016}; 1020};
1017 1021
@@ -1362,8 +1366,10 @@ struct aac_srb_reply
1362#define VM_CtBlockVerify64 18 1366#define VM_CtBlockVerify64 18
1363#define VM_CtHostRead64 19 1367#define VM_CtHostRead64 19
1364#define VM_CtHostWrite64 20 1368#define VM_CtHostWrite64 20
1369#define VM_DrvErrTblLog 21
1370#define VM_NameServe64 22
1365 1371
1366#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */ 1372#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
1367 1373
1368/* 1374/*
1369 * Descriptive information (eg, vital stats) 1375 * Descriptive information (eg, vital stats)
@@ -1472,6 +1478,7 @@ struct aac_mntent {
1472 manager (eg, filesystem) */ 1478 manager (eg, filesystem) */
1473 __le32 altoid; /* != oid <==> snapshot or 1479 __le32 altoid; /* != oid <==> snapshot or
1474 broken mirror exists */ 1480 broken mirror exists */
1481 __le32 capacityhigh;
1475}; 1482};
1476 1483
1477#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */ 1484#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
@@ -1707,6 +1714,7 @@ extern struct aac_common aac_config;
1707#define AifCmdJobProgress 2 /* Progress report */ 1714#define AifCmdJobProgress 2 /* Progress report */
1708#define AifJobCtrZero 101 /* Array Zero progress */ 1715#define AifJobCtrZero 101 /* Array Zero progress */
1709#define AifJobStsSuccess 1 /* Job completes */ 1716#define AifJobStsSuccess 1 /* Job completes */
1717#define AifJobStsRunning 102 /* Job running */
1710#define AifCmdAPIReport 3 /* Report from other user of API */ 1718#define AifCmdAPIReport 3 /* Report from other user of API */
1711#define AifCmdDriverNotify 4 /* Notify host driver of event */ 1719#define AifCmdDriverNotify 4 /* Notify host driver of event */
1712#define AifDenMorphComplete 200 /* A morph operation completed */ 1720#define AifDenMorphComplete 200 /* A morph operation completed */
@@ -1777,6 +1785,7 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size);
1777struct aac_driver_ident* aac_get_driver_ident(int devtype); 1785struct aac_driver_ident* aac_get_driver_ident(int devtype);
1778int aac_get_adapter_info(struct aac_dev* dev); 1786int aac_get_adapter_info(struct aac_dev* dev);
1779int aac_send_shutdown(struct aac_dev *dev); 1787int aac_send_shutdown(struct aac_dev *dev);
1788int probe_container(struct aac_dev *dev, int cid);
1780extern int numacb; 1789extern int numacb;
1781extern int acbsize; 1790extern int acbsize;
1782extern char aac_driver_version[]; 1791extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 75abd0453289..59a341b2aedc 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -195,7 +195,7 @@ int aac_send_shutdown(struct aac_dev * dev)
195 fibctx, 195 fibctx,
196 sizeof(struct aac_close), 196 sizeof(struct aac_close),
197 FsaNormal, 197 FsaNormal,
198 1, 1, 198 -2 /* Timeout silently */, 1,
199 NULL, NULL); 199 NULL, NULL);
200 200
201 if (status == 0) 201 if (status == 0)
@@ -313,8 +313,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
313 dev->max_fib_size = sizeof(struct hw_fib); 313 dev->max_fib_size = sizeof(struct hw_fib);
314 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 314 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
315 - sizeof(struct aac_fibhdr) 315 - sizeof(struct aac_fibhdr)
316 - sizeof(struct aac_write) + sizeof(struct sgmap)) 316 - sizeof(struct aac_write) + sizeof(struct sgentry))
317 / sizeof(struct sgmap); 317 / sizeof(struct sgentry);
318 dev->raw_io_64 = 0;
319 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
320 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
321 (status[0] == 0x00000001)) {
322 if (status[1] & AAC_OPT_NEW_COMM_64)
323 dev->raw_io_64 = 1;
324 }
318 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 325 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
319 0, 0, 0, 0, 0, 0, 326 0, 0, 0, 0, 0, 0,
320 status+0, status+1, status+2, status+3, status+4)) 327 status+0, status+1, status+2, status+3, status+4))
@@ -342,8 +349,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
342 dev->max_fib_size = 512; 349 dev->max_fib_size = 512;
343 dev->sg_tablesize = host->sg_tablesize 350 dev->sg_tablesize = host->sg_tablesize
344 = (512 - sizeof(struct aac_fibhdr) 351 = (512 - sizeof(struct aac_fibhdr)
345 - sizeof(struct aac_write) + sizeof(struct sgmap)) 352 - sizeof(struct aac_write) + sizeof(struct sgentry))
346 / sizeof(struct sgmap); 353 / sizeof(struct sgentry);
347 host->can_queue = AAC_NUM_IO_FIB; 354 host->can_queue = AAC_NUM_IO_FIB;
348 } else if (acbsize == 2048) { 355 } else if (acbsize == 2048) {
349 host->max_sectors = 512; 356 host->max_sectors = 512;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1d303f03480..e4d543a474ae 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -39,7 +39,9 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <scsi/scsi_host.h> 41#include <scsi/scsi_host.h>
42#include <scsi/scsi_device.h>
42#include <asm/semaphore.h> 43#include <asm/semaphore.h>
44#include <asm/delay.h>
43 45
44#include "aacraid.h" 46#include "aacraid.h"
45 47
@@ -269,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
269 /* Interrupt Moderation, only interrupt for first two entries */ 271 /* Interrupt Moderation, only interrupt for first two entries */
270 if (idx != le32_to_cpu(*(q->headers.consumer))) { 272 if (idx != le32_to_cpu(*(q->headers.consumer))) {
271 if (--idx == 0) { 273 if (--idx == 0) {
272 if (qid == AdapHighCmdQueue) 274 if (qid == AdapNormCmdQueue)
273 idx = ADAP_HIGH_CMD_ENTRIES;
274 else if (qid == AdapNormCmdQueue)
275 idx = ADAP_NORM_CMD_ENTRIES; 275 idx = ADAP_NORM_CMD_ENTRIES;
276 else if (qid == AdapHighRespQueue) 276 else
277 idx = ADAP_HIGH_RESP_ENTRIES;
278 else if (qid == AdapNormRespQueue)
279 idx = ADAP_NORM_RESP_ENTRIES; 277 idx = ADAP_NORM_RESP_ENTRIES;
280 } 278 }
281 if (idx != le32_to_cpu(*(q->headers.consumer))) 279 if (idx != le32_to_cpu(*(q->headers.consumer)))
282 *nonotify = 1; 280 *nonotify = 1;
283 } 281 }
284 282
285 if (qid == AdapHighCmdQueue) { 283 if (qid == AdapNormCmdQueue) {
286 if (*index >= ADAP_HIGH_CMD_ENTRIES)
287 *index = 0;
288 } else if (qid == AdapNormCmdQueue) {
289 if (*index >= ADAP_NORM_CMD_ENTRIES) 284 if (*index >= ADAP_NORM_CMD_ENTRIES)
290 *index = 0; /* Wrap to front of the Producer Queue. */ 285 *index = 0; /* Wrap to front of the Producer Queue. */
291 } 286 } else {
292 else if (qid == AdapHighRespQueue)
293 {
294 if (*index >= ADAP_HIGH_RESP_ENTRIES)
295 *index = 0;
296 }
297 else if (qid == AdapNormRespQueue)
298 {
299 if (*index >= ADAP_NORM_RESP_ENTRIES) 287 if (*index >= ADAP_NORM_RESP_ENTRIES)
300 *index = 0; /* Wrap to front of the Producer Queue. */ 288 *index = 0; /* Wrap to front of the Producer Queue. */
301 } 289 }
302 else {
303 printk("aacraid: invalid qid\n");
304 BUG();
305 }
306 290
307 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 291 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
308 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 292 printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
@@ -334,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
334{ 318{
335 struct aac_entry * entry = NULL; 319 struct aac_entry * entry = NULL;
336 int map = 0; 320 int map = 0;
337 struct aac_queue * q = &dev->queues->queue[qid];
338
339 spin_lock_irqsave(q->lock, q->SavedIrql);
340 321
341 if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 322 if (qid == AdapNormCmdQueue) {
342 {
343 /* if no entries wait for some if caller wants to */ 323 /* if no entries wait for some if caller wants to */
344 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 324 while (!aac_get_entry(dev, qid, &entry, index, nonotify))
345 { 325 {
@@ -350,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
350 */ 330 */
351 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 331 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
352 map = 1; 332 map = 1;
353 } 333 } else {
354 else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
355 {
356 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 334 while(!aac_get_entry(dev, qid, &entry, index, nonotify))
357 { 335 {
358 /* if no entries wait for some if caller wants to */ 336 /* if no entries wait for some if caller wants to */
@@ -375,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
375 return 0; 353 return 0;
376} 354}
377 355
378
379/**
380 * aac_insert_entry - insert a queue entry
381 * @dev: Adapter
382 * @index: Index of entry to insert
383 * @qid: Queue number
384 * @nonotify: Suppress adapter notification
385 *
386 * Gets the next free QE off the requested priorty adapter command
387 * queue and associates the Fib with the QE. The QE represented by
388 * index is ready to insert on the queue when this routine returns
389 * success.
390 */
391
392static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
393{
394 struct aac_queue * q = &dev->queues->queue[qid];
395
396 if(q == NULL)
397 BUG();
398 *(q->headers.producer) = cpu_to_le32(index + 1);
399 spin_unlock_irqrestore(q->lock, q->SavedIrql);
400
401 if (qid == AdapHighCmdQueue ||
402 qid == AdapNormCmdQueue ||
403 qid == AdapHighRespQueue ||
404 qid == AdapNormRespQueue)
405 {
406 if (!nonotify)
407 aac_adapter_notify(dev, qid);
408 }
409 else
410 printk("Suprise insert!\n");
411 return 0;
412}
413
414/* 356/*
415 * Define the highest level of host to adapter communication routines. 357 * Define the highest level of host to adapter communication routines.
416 * These routines will support host to adapter FS commuication. These 358 * These routines will support host to adapter FS commuication. These
@@ -439,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l
439int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 381int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
440{ 382{
441 u32 index; 383 u32 index;
442 u32 qid;
443 struct aac_dev * dev = fibptr->dev; 384 struct aac_dev * dev = fibptr->dev;
444 unsigned long nointr = 0; 385 unsigned long nointr = 0;
445 struct hw_fib * hw_fib = fibptr->hw_fib; 386 struct hw_fib * hw_fib = fibptr->hw_fib;
446 struct aac_queue * q; 387 struct aac_queue * q;
447 unsigned long flags = 0; 388 unsigned long flags = 0;
389 unsigned long qflags;
390
448 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 391 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
449 return -EBUSY; 392 return -EBUSY;
450 /* 393 /*
@@ -497,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
497 * Get a queue entry connect the FIB to it and send an notify 440 * Get a queue entry connect the FIB to it and send an notify
498 * the adapter a command is ready. 441 * the adapter a command is ready.
499 */ 442 */
500 if (priority == FsaHigh) { 443 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
501 hw_fib->header.XferState |= cpu_to_le32(HighPriority);
502 qid = AdapHighCmdQueue;
503 } else {
504 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
505 qid = AdapNormCmdQueue;
506 }
507 q = &dev->queues->queue[qid];
508 444
509 if(wait)
510 spin_lock_irqsave(&fibptr->event_lock, flags);
511 if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
512 return -EWOULDBLOCK;
513 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
514 dprintk((KERN_DEBUG "Fib contents:.\n"));
515 dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
516 dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
517 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
518 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
519 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
520 /* 445 /*
521 * Fill in the Callback and CallbackContext if we are not 446 * Fill in the Callback and CallbackContext if we are not
522 * going to wait. 447 * going to wait.
@@ -525,22 +450,67 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
525 fibptr->callback = callback; 450 fibptr->callback = callback;
526 fibptr->callback_data = callback_data; 451 fibptr->callback_data = callback_data;
527 } 452 }
528 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
529 list_add_tail(&fibptr->queue, &q->pendingq);
530 q->numpending++;
531 453
532 fibptr->done = 0; 454 fibptr->done = 0;
533 fibptr->flags = 0; 455 fibptr->flags = 0;
534 456
535 if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0) 457 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
536 return -EWOULDBLOCK; 458
459 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
460 dprintk((KERN_DEBUG "Fib contents:.\n"));
461 dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
462 dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
463 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
466
467 q = &dev->queues->queue[AdapNormCmdQueue];
468
469 if(wait)
470 spin_lock_irqsave(&fibptr->event_lock, flags);
471 spin_lock_irqsave(q->lock, qflags);
472 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
473
474 list_add_tail(&fibptr->queue, &q->pendingq);
475 q->numpending++;
476 *(q->headers.producer) = cpu_to_le32(index + 1);
477 spin_unlock_irqrestore(q->lock, qflags);
478 if (!(nointr & aac_config.irq_mod))
479 aac_adapter_notify(dev, AdapNormCmdQueue);
537 /* 480 /*
538 * If the caller wanted us to wait for response wait now. 481 * If the caller wanted us to wait for response wait now.
539 */ 482 */
540 483
541 if (wait) { 484 if (wait) {
542 spin_unlock_irqrestore(&fibptr->event_lock, flags); 485 spin_unlock_irqrestore(&fibptr->event_lock, flags);
543 down(&fibptr->event_wait); 486 /* Only set for first known interruptable command */
487 if (wait < 0) {
488 /*
489 * *VERY* Dangerous to time out a command, the
490 * assumption is made that we have no hope of
491 * functioning because an interrupt routing or other
492 * hardware failure has occurred.
493 */
494 unsigned long count = 36000000L; /* 3 minutes */
495 unsigned long qflags;
496 while (down_trylock(&fibptr->event_wait)) {
497 if (--count == 0) {
498 spin_lock_irqsave(q->lock, qflags);
499 q->numpending--;
500 list_del(&fibptr->queue);
501 spin_unlock_irqrestore(q->lock, qflags);
502 if (wait == -1) {
503 printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n"
504 "Usually a result of a PCI interrupt routing problem;\n"
505 "update mother board BIOS or consider utilizing one of\n"
506 "the SAFE mode kernel options (acpi, apic etc)\n");
507 }
508 return -ETIMEDOUT;
509 }
510 udelay(5);
511 }
512 } else
513 down(&fibptr->event_wait);
544 if(fibptr->done == 0) 514 if(fibptr->done == 0)
545 BUG(); 515 BUG();
546 516
@@ -622,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
622 case HostNormCmdQueue: 592 case HostNormCmdQueue:
623 notify = HostNormCmdNotFull; 593 notify = HostNormCmdNotFull;
624 break; 594 break;
625 case HostHighCmdQueue:
626 notify = HostHighCmdNotFull;
627 break;
628 case HostNormRespQueue: 595 case HostNormRespQueue:
629 notify = HostNormRespNotFull; 596 notify = HostNormRespNotFull;
630 break; 597 break;
631 case HostHighRespQueue:
632 notify = HostHighRespNotFull;
633 break;
634 default: 598 default:
635 BUG(); 599 BUG();
636 return; 600 return;
@@ -652,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
652{ 616{
653 struct hw_fib * hw_fib = fibptr->hw_fib; 617 struct hw_fib * hw_fib = fibptr->hw_fib;
654 struct aac_dev * dev = fibptr->dev; 618 struct aac_dev * dev = fibptr->dev;
619 struct aac_queue * q;
655 unsigned long nointr = 0; 620 unsigned long nointr = 0;
656 if (hw_fib->header.XferState == 0) 621 unsigned long qflags;
622
623 if (hw_fib->header.XferState == 0) {
657 return 0; 624 return 0;
625 }
658 /* 626 /*
659 * If we plan to do anything check the structure type first. 627 * If we plan to do anything check the structure type first.
660 */ 628 */
@@ -669,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
669 * send the completed cdb to the adapter. 637 * send the completed cdb to the adapter.
670 */ 638 */
671 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 639 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
640 u32 index;
672 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 641 hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
673 if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) { 642 if (size) {
674 u32 index; 643 size += sizeof(struct aac_fibhdr);
675 if (size) 644 if (size > le16_to_cpu(hw_fib->header.SenderSize))
676 { 645 return -EMSGSIZE;
677 size += sizeof(struct aac_fibhdr); 646 hw_fib->header.Size = cpu_to_le16(size);
678 if (size > le16_to_cpu(hw_fib->header.SenderSize))
679 return -EMSGSIZE;
680 hw_fib->header.Size = cpu_to_le16(size);
681 }
682 if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
683 return -EWOULDBLOCK;
684 }
685 if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
686 }
687 } else if (hw_fib->header.XferState &
688 cpu_to_le32(NormalPriority)) {
689 u32 index;
690
691 if (size) {
692 size += sizeof(struct aac_fibhdr);
693 if (size > le16_to_cpu(hw_fib->header.SenderSize))
694 return -EMSGSIZE;
695 hw_fib->header.Size = cpu_to_le16(size);
696 }
697 if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
698 return -EWOULDBLOCK;
699 if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
700 {
701 }
702 } 647 }
648 q = &dev->queues->queue[AdapNormRespQueue];
649 spin_lock_irqsave(q->lock, qflags);
650 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
651 *(q->headers.producer) = cpu_to_le32(index + 1);
652 spin_unlock_irqrestore(q->lock, qflags);
653 if (!(nointr & (int)aac_config.irq_mod))
654 aac_adapter_notify(dev, AdapNormRespQueue);
703 } 655 }
704 else 656 else
705 { 657 {
@@ -791,6 +743,268 @@ void aac_printf(struct aac_dev *dev, u32 val)
791 memset(cp, 0, 256); 743 memset(cp, 0, 256);
792} 744}
793 745
746
747/**
748 * aac_handle_aif - Handle a message from the firmware
749 * @dev: Which adapter this fib is from
750 * @fibptr: Pointer to fibptr from adapter
751 *
752 * This routine handles a driver notify fib from the adapter and
753 * dispatches it to the appropriate routine for handling.
754 */
755
756static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
757{
758 struct hw_fib * hw_fib = fibptr->hw_fib;
759 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
760 int busy;
761 u32 container;
762 struct scsi_device *device;
763 enum {
764 NOTHING,
765 DELETE,
766 ADD,
767 CHANGE
768 } device_config_needed;
769
770 /* Sniff for container changes */
771
772 if (!dev)
773 return;
774 container = (u32)-1;
775
776 /*
777 * We have set this up to try and minimize the number of
778 * re-configures that take place. As a result of this when
779 * certain AIF's come in we will set a flag waiting for another
780 * type of AIF before setting the re-config flag.
781 */
782 switch (le32_to_cpu(aifcmd->command)) {
783 case AifCmdDriverNotify:
784 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
785 /*
786 * Morph or Expand complete
787 */
788 case AifDenMorphComplete:
789 case AifDenVolumeExtendComplete:
790 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
791 if (container >= dev->maximum_num_containers)
792 break;
793
794 /*
795 * Find the Scsi_Device associated with the SCSI
796 * address. Make sure we have the right array, and if
797 * so set the flag to initiate a new re-config once we
798 * see an AifEnConfigChange AIF come through.
799 */
800
801 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
802 device = scsi_device_lookup(dev->scsi_host_ptr,
803 CONTAINER_TO_CHANNEL(container),
804 CONTAINER_TO_ID(container),
805 CONTAINER_TO_LUN(container));
806 if (device) {
807 dev->fsa_dev[container].config_needed = CHANGE;
808 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
809 scsi_device_put(device);
810 }
811 }
812 }
813
814 /*
815 * If we are waiting on something and this happens to be
816 * that thing then set the re-configure flag.
817 */
818 if (container != (u32)-1) {
819 if (container >= dev->maximum_num_containers)
820 break;
821 if (dev->fsa_dev[container].config_waiting_on ==
822 le32_to_cpu(*(u32 *)aifcmd->data))
823 dev->fsa_dev[container].config_waiting_on = 0;
824 } else for (container = 0;
825 container < dev->maximum_num_containers; ++container) {
826 if (dev->fsa_dev[container].config_waiting_on ==
827 le32_to_cpu(*(u32 *)aifcmd->data))
828 dev->fsa_dev[container].config_waiting_on = 0;
829 }
830 break;
831
832 case AifCmdEventNotify:
833 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
834 /*
835 * Add an Array.
836 */
837 case AifEnAddContainer:
838 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
839 if (container >= dev->maximum_num_containers)
840 break;
841 dev->fsa_dev[container].config_needed = ADD;
842 dev->fsa_dev[container].config_waiting_on =
843 AifEnConfigChange;
844 break;
845
846 /*
847 * Delete an Array.
848 */
849 case AifEnDeleteContainer:
850 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
851 if (container >= dev->maximum_num_containers)
852 break;
853 dev->fsa_dev[container].config_needed = DELETE;
854 dev->fsa_dev[container].config_waiting_on =
855 AifEnConfigChange;
856 break;
857
858 /*
859 * Container change detected. If we currently are not
860 * waiting on something else, setup to wait on a Config Change.
861 */
862 case AifEnContainerChange:
863 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
864 if (container >= dev->maximum_num_containers)
865 break;
866 if (dev->fsa_dev[container].config_waiting_on)
867 break;
868 dev->fsa_dev[container].config_needed = CHANGE;
869 dev->fsa_dev[container].config_waiting_on =
870 AifEnConfigChange;
871 break;
872
873 case AifEnConfigChange:
874 break;
875
876 }
877
878 /*
879 * If we are waiting on something and this happens to be
880 * that thing then set the re-configure flag.
881 */
882 if (container != (u32)-1) {
883 if (container >= dev->maximum_num_containers)
884 break;
885 if (dev->fsa_dev[container].config_waiting_on ==
886 le32_to_cpu(*(u32 *)aifcmd->data))
887 dev->fsa_dev[container].config_waiting_on = 0;
888 } else for (container = 0;
889 container < dev->maximum_num_containers; ++container) {
890 if (dev->fsa_dev[container].config_waiting_on ==
891 le32_to_cpu(*(u32 *)aifcmd->data))
892 dev->fsa_dev[container].config_waiting_on = 0;
893 }
894 break;
895
896 case AifCmdJobProgress:
897 /*
898 * These are job progress AIF's. When a Clear is being
899 * done on a container it is initially created then hidden from
900 * the OS. When the clear completes we don't get a config
901 * change so we monitor the job status complete on a clear then
902 * wait for a container change.
903 */
904
905 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
906 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
907 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
908 for (container = 0;
909 container < dev->maximum_num_containers;
910 ++container) {
911 /*
912 * Stomp on all config sequencing for all
913 * containers?
914 */
915 dev->fsa_dev[container].config_waiting_on =
916 AifEnContainerChange;
917 dev->fsa_dev[container].config_needed = ADD;
918 }
919 }
920 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
921 && (((u32 *)aifcmd->data)[6] == 0)
922 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
923 for (container = 0;
924 container < dev->maximum_num_containers;
925 ++container) {
926 /*
927 * Stomp on all config sequencing for all
928 * containers?
929 */
930 dev->fsa_dev[container].config_waiting_on =
931 AifEnContainerChange;
932 dev->fsa_dev[container].config_needed = DELETE;
933 }
934 }
935 break;
936 }
937
938 device_config_needed = NOTHING;
939 for (container = 0; container < dev->maximum_num_containers;
940 ++container) {
941 if ((dev->fsa_dev[container].config_waiting_on == 0)
942 && (dev->fsa_dev[container].config_needed != NOTHING)) {
943 device_config_needed =
944 dev->fsa_dev[container].config_needed;
945 dev->fsa_dev[container].config_needed = NOTHING;
946 break;
947 }
948 }
949 if (device_config_needed == NOTHING)
950 return;
951
952 /*
953 * If we decided that a re-configuration needs to be done,
954 * schedule it here on the way out the door, please close the door
955 * behind you.
956 */
957
958 busy = 0;
959
960
961 /*
962 * Find the Scsi_Device associated with the SCSI address,
963 * and mark it as changed, invalidating the cache. This deals
964 * with changes to existing device IDs.
965 */
966
967 if (!dev || !dev->scsi_host_ptr)
968 return;
969 /*
970 * force reload of disk info via probe_container
971 */
972 if ((device_config_needed == CHANGE)
973 && (dev->fsa_dev[container].valid == 1))
974 dev->fsa_dev[container].valid = 2;
975 if ((device_config_needed == CHANGE) ||
976 (device_config_needed == ADD))
977 probe_container(dev, container);
978 device = scsi_device_lookup(dev->scsi_host_ptr,
979 CONTAINER_TO_CHANNEL(container),
980 CONTAINER_TO_ID(container),
981 CONTAINER_TO_LUN(container));
982 if (device) {
983 switch (device_config_needed) {
984 case DELETE:
985 scsi_remove_device(device);
986 break;
987 case CHANGE:
988 if (!dev->fsa_dev[container].valid) {
989 scsi_remove_device(device);
990 break;
991 }
992 scsi_rescan_device(&device->sdev_gendev);
993
994 default:
995 break;
996 }
997 scsi_device_put(device);
998 }
999 if (device_config_needed == ADD) {
1000 scsi_add_device(dev->scsi_host_ptr,
1001 CONTAINER_TO_CHANNEL(container),
1002 CONTAINER_TO_ID(container),
1003 CONTAINER_TO_LUN(container));
1004 }
1005
1006}
1007
794/** 1008/**
795 * aac_command_thread - command processing thread 1009 * aac_command_thread - command processing thread
796 * @dev: Adapter to monitor 1010 * @dev: Adapter to monitor
@@ -805,7 +1019,6 @@ int aac_command_thread(struct aac_dev * dev)
805{ 1019{
806 struct hw_fib *hw_fib, *hw_newfib; 1020 struct hw_fib *hw_fib, *hw_newfib;
807 struct fib *fib, *newfib; 1021 struct fib *fib, *newfib;
808 struct aac_queue_block *queues = dev->queues;
809 struct aac_fib_context *fibctx; 1022 struct aac_fib_context *fibctx;
810 unsigned long flags; 1023 unsigned long flags;
811 DECLARE_WAITQUEUE(wait, current); 1024 DECLARE_WAITQUEUE(wait, current);
@@ -825,21 +1038,22 @@ int aac_command_thread(struct aac_dev * dev)
825 * Let the DPC know it has a place to send the AIF's to. 1038 * Let the DPC know it has a place to send the AIF's to.
826 */ 1039 */
827 dev->aif_thread = 1; 1040 dev->aif_thread = 1;
828 add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); 1041 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
829 set_current_state(TASK_INTERRUPTIBLE); 1042 set_current_state(TASK_INTERRUPTIBLE);
1043 dprintk ((KERN_INFO "aac_command_thread start\n"));
830 while(1) 1044 while(1)
831 { 1045 {
832 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); 1046 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
833 while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) { 1047 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
834 struct list_head *entry; 1048 struct list_head *entry;
835 struct aac_aifcmd * aifcmd; 1049 struct aac_aifcmd * aifcmd;
836 1050
837 set_current_state(TASK_RUNNING); 1051 set_current_state(TASK_RUNNING);
838 1052
839 entry = queues->queue[HostNormCmdQueue].cmdq.next; 1053 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
840 list_del(entry); 1054 list_del(entry);
841 1055
842 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); 1056 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
843 fib = list_entry(entry, struct fib, fiblink); 1057 fib = list_entry(entry, struct fib, fiblink);
844 /* 1058 /*
845 * We will process the FIB here or pass it to a 1059 * We will process the FIB here or pass it to a
@@ -860,6 +1074,7 @@ int aac_command_thread(struct aac_dev * dev)
860 aifcmd = (struct aac_aifcmd *) hw_fib->data; 1074 aifcmd = (struct aac_aifcmd *) hw_fib->data;
861 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 1075 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
862 /* Handle Driver Notify Events */ 1076 /* Handle Driver Notify Events */
1077 aac_handle_aif(dev, fib);
863 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1078 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
864 fib_adapter_complete(fib, (u16)sizeof(u32)); 1079 fib_adapter_complete(fib, (u16)sizeof(u32));
865 } else { 1080 } else {
@@ -869,9 +1084,62 @@ int aac_command_thread(struct aac_dev * dev)
869 1084
870 u32 time_now, time_last; 1085 u32 time_now, time_last;
871 unsigned long flagv; 1086 unsigned long flagv;
872 1087 unsigned num;
1088 struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1089 struct fib ** fib_pool, ** fib_p;
1090
1091 /* Sniff events */
1092 if ((aifcmd->command ==
1093 cpu_to_le32(AifCmdEventNotify)) ||
1094 (aifcmd->command ==
1095 cpu_to_le32(AifCmdJobProgress))) {
1096 aac_handle_aif(dev, fib);
1097 }
1098
873 time_now = jiffies/HZ; 1099 time_now = jiffies/HZ;
874 1100
1101 /*
1102 * Warning: no sleep allowed while
1103 * holding spinlock. We take the estimate
1104 * and pre-allocate a set of fibs outside the
1105 * lock.
1106 */
1107 num = le32_to_cpu(dev->init->AdapterFibsSize)
1108 / sizeof(struct hw_fib); /* some extra */
1109 spin_lock_irqsave(&dev->fib_lock, flagv);
1110 entry = dev->fib_list.next;
1111 while (entry != &dev->fib_list) {
1112 entry = entry->next;
1113 ++num;
1114 }
1115 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1116 hw_fib_pool = NULL;
1117 fib_pool = NULL;
1118 if (num
1119 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1120 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1121 hw_fib_p = hw_fib_pool;
1122 fib_p = fib_pool;
1123 while (hw_fib_p < &hw_fib_pool[num]) {
1124 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1125 --hw_fib_p;
1126 break;
1127 }
1128 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1129 kfree(*(--hw_fib_p));
1130 break;
1131 }
1132 }
1133 if ((num = hw_fib_p - hw_fib_pool) == 0) {
1134 kfree(fib_pool);
1135 fib_pool = NULL;
1136 kfree(hw_fib_pool);
1137 hw_fib_pool = NULL;
1138 }
1139 } else if (hw_fib_pool) {
1140 kfree(hw_fib_pool);
1141 hw_fib_pool = NULL;
1142 }
875 spin_lock_irqsave(&dev->fib_lock, flagv); 1143 spin_lock_irqsave(&dev->fib_lock, flagv);
876 entry = dev->fib_list.next; 1144 entry = dev->fib_list.next;
877 /* 1145 /*
@@ -880,6 +1148,8 @@ int aac_command_thread(struct aac_dev * dev)
880 * fib, and then set the event to wake up the 1148 * fib, and then set the event to wake up the
881 * thread that is waiting for it. 1149 * thread that is waiting for it.
882 */ 1150 */
1151 hw_fib_p = hw_fib_pool;
1152 fib_p = fib_pool;
883 while (entry != &dev->fib_list) { 1153 while (entry != &dev->fib_list) {
884 /* 1154 /*
885 * Extract the fibctx 1155 * Extract the fibctx
@@ -912,9 +1182,11 @@ int aac_command_thread(struct aac_dev * dev)
912 * Warning: no sleep allowed while 1182 * Warning: no sleep allowed while
913 * holding spinlock 1183 * holding spinlock
914 */ 1184 */
915 hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); 1185 if (hw_fib_p < &hw_fib_pool[num]) {
916 newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); 1186 hw_newfib = *hw_fib_p;
917 if (newfib && hw_newfib) { 1187 *(hw_fib_p++) = NULL;
1188 newfib = *fib_p;
1189 *(fib_p++) = NULL;
918 /* 1190 /*
919 * Make the copy of the FIB 1191 * Make the copy of the FIB
920 */ 1192 */
@@ -929,15 +1201,11 @@ int aac_command_thread(struct aac_dev * dev)
929 fibctx->count++; 1201 fibctx->count++;
930 /* 1202 /*
931 * Set the event to wake up the 1203 * Set the event to wake up the
932 * thread that will waiting. 1204 * thread that is waiting.
933 */ 1205 */
934 up(&fibctx->wait_sem); 1206 up(&fibctx->wait_sem);
935 } else { 1207 } else {
936 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1208 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
937 if(newfib)
938 kfree(newfib);
939 if(hw_newfib)
940 kfree(hw_newfib);
941 } 1209 }
942 entry = entry->next; 1210 entry = entry->next;
943 } 1211 }
@@ -947,21 +1215,38 @@ int aac_command_thread(struct aac_dev * dev)
947 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1215 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
948 fib_adapter_complete(fib, sizeof(u32)); 1216 fib_adapter_complete(fib, sizeof(u32));
949 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1217 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1218 /* Free up the remaining resources */
1219 hw_fib_p = hw_fib_pool;
1220 fib_p = fib_pool;
1221 while (hw_fib_p < &hw_fib_pool[num]) {
1222 if (*hw_fib_p)
1223 kfree(*hw_fib_p);
1224 if (*fib_p)
1225 kfree(*fib_p);
1226 ++fib_p;
1227 ++hw_fib_p;
1228 }
1229 if (hw_fib_pool)
1230 kfree(hw_fib_pool);
1231 if (fib_pool)
1232 kfree(fib_pool);
950 } 1233 }
951 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
952 kfree(fib); 1234 kfree(fib);
1235 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
953 } 1236 }
954 /* 1237 /*
955 * There are no more AIF's 1238 * There are no more AIF's
956 */ 1239 */
957 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); 1240 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
958 schedule(); 1241 schedule();
959 1242
960 if(signal_pending(current)) 1243 if(signal_pending(current))
961 break; 1244 break;
962 set_current_state(TASK_INTERRUPTIBLE); 1245 set_current_state(TASK_INTERRUPTIBLE);
963 } 1246 }
964 remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); 1247 if (dev->queues)
1248 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
965 dev->aif_thread = 0; 1249 dev->aif_thread = 0;
966 complete_and_exit(&dev->aif_completion, 0); 1250 complete_and_exit(&dev->aif_completion, 0);
1251 return 0;
967} 1252}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4ff29d7f5825..de8490a92831 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -748,7 +748,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
748 unique_id++; 748 unique_id++;
749 } 749 }
750 750
751 if (pci_enable_device(pdev)) 751 error = pci_enable_device(pdev);
752 if (error)
752 goto out; 753 goto out;
753 754
754 if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) || 755 if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
@@ -772,6 +773,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
772 shost->irq = pdev->irq; 773 shost->irq = pdev->irq;
773 shost->base = pci_resource_start(pdev, 0); 774 shost->base = pci_resource_start(pdev, 0);
774 shost->unique_id = unique_id; 775 shost->unique_id = unique_id;
776 shost->max_cmd_len = 16;
775 777
776 aac = (struct aac_dev *)shost->hostdata; 778 aac = (struct aac_dev *)shost->hostdata;
777 aac->scsi_host_ptr = shost; 779 aac->scsi_host_ptr = shost;
@@ -799,7 +801,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
799 goto out_free_fibs; 801 goto out_free_fibs;
800 802
801 aac->maximum_num_channels = aac_drivers[index].channels; 803 aac->maximum_num_channels = aac_drivers[index].channels;
802 aac_get_adapter_info(aac); 804 error = aac_get_adapter_info(aac);
805 if (error < 0)
806 goto out_deinit;
803 807
804 /* 808 /*
805 * Lets override negotiations and drop the maximum SG limit to 34 809 * Lets override negotiations and drop the maximum SG limit to 34
@@ -927,8 +931,8 @@ static int __init aac_init(void)
927 printk(KERN_INFO "Adaptec %s driver (%s)\n", 931 printk(KERN_INFO "Adaptec %s driver (%s)\n",
928 AAC_DRIVERNAME, aac_driver_version); 932 AAC_DRIVERNAME, aac_driver_version);
929 933
930 error = pci_module_init(&aac_pci_driver); 934 error = pci_register_driver(&aac_pci_driver);
931 if (error) 935 if (error < 0)
932 return error; 936 return error;
933 937
934 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); 938 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 70c5fb59c9ea..d754b3267863 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -112,6 +112,9 @@ aic7770_remove(struct device *dev)
112 struct ahc_softc *ahc = dev_get_drvdata(dev); 112 struct ahc_softc *ahc = dev_get_drvdata(dev);
113 u_long s; 113 u_long s;
114 114
115 if (ahc->platform_data && ahc->platform_data->host)
116 scsi_remove_host(ahc->platform_data->host);
117
115 ahc_lock(ahc, &s); 118 ahc_lock(ahc, &s);
116 ahc_intr_enable(ahc, FALSE); 119 ahc_intr_enable(ahc, FALSE);
117 ahc_unlock(ahc, &s); 120 ahc_unlock(ahc, &s);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6b6d4e287793..95c285cc83e4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1192,11 +1192,6 @@ ahd_platform_free(struct ahd_softc *ahd)
1192 int i, j; 1192 int i, j;
1193 1193
1194 if (ahd->platform_data != NULL) { 1194 if (ahd->platform_data != NULL) {
1195 if (ahd->platform_data->host != NULL) {
1196 scsi_remove_host(ahd->platform_data->host);
1197 scsi_host_put(ahd->platform_data->host);
1198 }
1199
1200 /* destroy all of the device and target objects */ 1195 /* destroy all of the device and target objects */
1201 for (i = 0; i < AHD_NUM_TARGETS; i++) { 1196 for (i = 0; i < AHD_NUM_TARGETS; i++) {
1202 starget = ahd->platform_data->starget[i]; 1197 starget = ahd->platform_data->starget[i];
@@ -1226,6 +1221,9 @@ ahd_platform_free(struct ahd_softc *ahd)
1226 release_mem_region(ahd->platform_data->mem_busaddr, 1221 release_mem_region(ahd->platform_data->mem_busaddr,
1227 0x1000); 1222 0x1000);
1228 } 1223 }
1224 if (ahd->platform_data->host)
1225 scsi_host_put(ahd->platform_data->host);
1226
1229 free(ahd->platform_data, M_DEVBUF); 1227 free(ahd->platform_data, M_DEVBUF);
1230 } 1228 }
1231} 1229}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 390b53852d4b..bf360ae021ab 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -95,6 +95,9 @@ ahd_linux_pci_dev_remove(struct pci_dev *pdev)
95 struct ahd_softc *ahd = pci_get_drvdata(pdev); 95 struct ahd_softc *ahd = pci_get_drvdata(pdev);
96 u_long s; 96 u_long s;
97 97
98 if (ahd->platform_data && ahd->platform_data->host)
99 scsi_remove_host(ahd->platform_data->host);
100
98 ahd_lock(ahd, &s); 101 ahd_lock(ahd, &s);
99 ahd_intr_enable(ahd, FALSE); 102 ahd_intr_enable(ahd, FALSE);
100 ahd_unlock(ahd, &s); 103 ahd_unlock(ahd, &s);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index c932b3b94490..6ee1435d37fa 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1109,15 +1109,6 @@ ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *templa
1109 return (0); 1109 return (0);
1110} 1110}
1111 1111
1112uint64_t
1113ahc_linux_get_memsize(void)
1114{
1115 struct sysinfo si;
1116
1117 si_meminfo(&si);
1118 return ((uint64_t)si.totalram << PAGE_SHIFT);
1119}
1120
1121/* 1112/*
1122 * Place the SCSI bus into a known state by either resetting it, 1113 * Place the SCSI bus into a known state by either resetting it,
1123 * or forcing transfer negotiations on the next command to any 1114 * or forcing transfer negotiations on the next command to any
@@ -1218,11 +1209,6 @@ ahc_platform_free(struct ahc_softc *ahc)
1218 int i, j; 1209 int i, j;
1219 1210
1220 if (ahc->platform_data != NULL) { 1211 if (ahc->platform_data != NULL) {
1221 if (ahc->platform_data->host != NULL) {
1222 scsi_remove_host(ahc->platform_data->host);
1223 scsi_host_put(ahc->platform_data->host);
1224 }
1225
1226 /* destroy all of the device and target objects */ 1212 /* destroy all of the device and target objects */
1227 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1213 for (i = 0; i < AHC_NUM_TARGETS; i++) {
1228 starget = ahc->platform_data->starget[i]; 1214 starget = ahc->platform_data->starget[i];
@@ -1251,6 +1237,9 @@ ahc_platform_free(struct ahc_softc *ahc)
1251 0x1000); 1237 0x1000);
1252 } 1238 }
1253 1239
1240 if (ahc->platform_data->host)
1241 scsi_host_put(ahc->platform_data->host);
1242
1254 free(ahc->platform_data, M_DEVBUF); 1243 free(ahc->platform_data, M_DEVBUF);
1255 } 1244 }
1256} 1245}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index c52996269240..be9edbe26dbe 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -494,8 +494,6 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
494int ahc_linux_register_host(struct ahc_softc *, 494int ahc_linux_register_host(struct ahc_softc *,
495 struct scsi_host_template *); 495 struct scsi_host_template *);
496 496
497uint64_t ahc_linux_get_memsize(void);
498
499/*************************** Pretty Printing **********************************/ 497/*************************** Pretty Printing **********************************/
500struct info_str { 498struct info_str {
501 char *buffer; 499 char *buffer;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 0d44a6907dd2..cb30d9c1153d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -143,6 +143,9 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
143 struct ahc_softc *ahc = pci_get_drvdata(pdev); 143 struct ahc_softc *ahc = pci_get_drvdata(pdev);
144 u_long s; 144 u_long s;
145 145
146 if (ahc->platform_data && ahc->platform_data->host)
147 scsi_remove_host(ahc->platform_data->host);
148
146 ahc_lock(ahc, &s); 149 ahc_lock(ahc, &s);
147 ahc_intr_enable(ahc, FALSE); 150 ahc_intr_enable(ahc, FALSE);
148 ahc_unlock(ahc, &s); 151 ahc_unlock(ahc, &s);
@@ -180,6 +183,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
180 struct ahc_pci_identity *entry; 183 struct ahc_pci_identity *entry;
181 char *name; 184 char *name;
182 int error; 185 int error;
186 struct device *dev = &pdev->dev;
183 187
184 pci = pdev; 188 pci = pdev;
185 entry = ahc_find_pci_device(pci); 189 entry = ahc_find_pci_device(pci);
@@ -209,11 +213,12 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
209 pci_set_master(pdev); 213 pci_set_master(pdev);
210 214
211 if (sizeof(dma_addr_t) > 4 215 if (sizeof(dma_addr_t) > 4
212 && ahc_linux_get_memsize() > 0x80000000 216 && ahc->features & AHC_LARGE_SCBS
213 && pci_set_dma_mask(pdev, mask_39bit) == 0) { 217 && dma_set_mask(dev, mask_39bit) == 0
218 && dma_get_required_mask(dev) > DMA_32BIT_MASK) {
214 ahc->flags |= AHC_39BIT_ADDRESSING; 219 ahc->flags |= AHC_39BIT_ADDRESSING;
215 } else { 220 } else {
216 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 221 if (dma_set_mask(dev, DMA_32BIT_MASK)) {
217 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); 222 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
218 return (-ENODEV); 223 return (-ENODEV);
219 } 224 }
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 87e0c36f1554..d71cef767cec 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -442,7 +442,6 @@ static void piix_sata_phy_reset(struct ata_port *ap)
442 * piix_set_piomode - Initialize host controller PATA PIO timings 442 * piix_set_piomode - Initialize host controller PATA PIO timings
443 * @ap: Port whose timings we are configuring 443 * @ap: Port whose timings we are configuring
444 * @adev: um 444 * @adev: um
445 * @pio: PIO mode, 0 - 4
446 * 445 *
447 * Set PIO mode for device, in host controller PCI config space. 446 * Set PIO mode for device, in host controller PCI config space.
448 * 447 *
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index e6153fe5842a..a8cfbef304b5 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -996,6 +996,7 @@ oktosend:
996#ifdef ED_DBGP 996#ifdef ED_DBGP
997 printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id); 997 printk("send_s870: prdaddr_2 0x%8x tmpcip %x target_id %d\n", dev->id[c][target_id].prdaddr,tmpcip,target_id);
998#endif 998#endif
999 dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus;
999 outl(dev->id[c][target_id].prdaddr, tmpcip); 1000 outl(dev->id[c][target_id].prdaddr, tmpcip);
1000 tmpcip = tmpcip - 2; 1001 tmpcip = tmpcip - 2;
1001 outb(0x06, tmpcip); 1002 outb(0x06, tmpcip);
@@ -2572,7 +2573,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
2572 for (k = 0; k < 16; k++) { 2573 for (k = 0; k < 16; k++) {
2573 if (!atp_dev->id[j][k].prd_table) 2574 if (!atp_dev->id[j][k].prd_table)
2574 continue; 2575 continue;
2575 pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prdaddr); 2576 pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
2576 atp_dev->id[j][k].prd_table = NULL; 2577 atp_dev->id[j][k].prd_table = NULL;
2577 } 2578 }
2578 } 2579 }
@@ -2584,12 +2585,13 @@ static int atp870u_init_tables(struct Scsi_Host *host)
2584 int c,k; 2585 int c,k;
2585 for(c=0;c < 2;c++) { 2586 for(c=0;c < 2;c++) {
2586 for(k=0;k<16;k++) { 2587 for(k=0;k<16;k++) {
2587 atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prdaddr)); 2588 atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
2588 if (!atp_dev->id[c][k].prd_table) { 2589 if (!atp_dev->id[c][k].prd_table) {
2589 printk("atp870u_init_tables fail\n"); 2590 printk("atp870u_init_tables fail\n");
2590 atp870u_free_tables(host); 2591 atp870u_free_tables(host);
2591 return -ENOMEM; 2592 return -ENOMEM;
2592 } 2593 }
2594 atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus;
2593 atp_dev->id[c][k].devsp=0x20; 2595 atp_dev->id[c][k].devsp=0x20;
2594 atp_dev->id[c][k].devtype = 0x7f; 2596 atp_dev->id[c][k].devtype = 0x7f;
2595 atp_dev->id[c][k].curr_req = NULL; 2597 atp_dev->id[c][k].curr_req = NULL;
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 89f43af39cf2..62bae64a01c1 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -54,8 +54,9 @@ struct atp_unit
54 unsigned long tran_len; 54 unsigned long tran_len;
55 unsigned long last_len; 55 unsigned long last_len;
56 unsigned char *prd_pos; 56 unsigned char *prd_pos;
57 unsigned char *prd_table; 57 unsigned char *prd_table; /* Kernel address of PRD table */
58 dma_addr_t prdaddr; 58 dma_addr_t prd_bus; /* Bus address of PRD */
59 dma_addr_t prdaddr; /* Dynamically updated in driver */
59 struct scsi_cmnd *curr_req; 60 struct scsi_cmnd *curr_req;
60 } id[2][16]; 61 } id[2][16];
61 struct Scsi_Host *host; 62 struct Scsi_Host *host;
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index fa652f8aa643..d59d449a9e4d 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -1360,3 +1360,5 @@ static Scsi_Host_Template driver_template = {
1360 .use_clustering = DISABLE_CLUSTERING, 1360 .use_clustering = DISABLE_CLUSTERING,
1361}; 1361};
1362#include "scsi_module.c" 1362#include "scsi_module.c"
1363
1364MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 85503fad789a..02fe371b0ab8 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -98,6 +98,7 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
98 switch (oldstate) { 98 switch (oldstate) {
99 case SHOST_CREATED: 99 case SHOST_CREATED:
100 case SHOST_RUNNING: 100 case SHOST_RUNNING:
101 case SHOST_CANCEL_RECOVERY:
101 break; 102 break;
102 default: 103 default:
103 goto illegal; 104 goto illegal;
@@ -107,12 +108,31 @@ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
107 case SHOST_DEL: 108 case SHOST_DEL:
108 switch (oldstate) { 109 switch (oldstate) {
109 case SHOST_CANCEL: 110 case SHOST_CANCEL:
111 case SHOST_DEL_RECOVERY:
110 break; 112 break;
111 default: 113 default:
112 goto illegal; 114 goto illegal;
113 } 115 }
114 break; 116 break;
115 117
118 case SHOST_CANCEL_RECOVERY:
119 switch (oldstate) {
120 case SHOST_CANCEL:
121 case SHOST_RECOVERY:
122 break;
123 default:
124 goto illegal;
125 }
126 break;
127
128 case SHOST_DEL_RECOVERY:
129 switch (oldstate) {
130 case SHOST_CANCEL_RECOVERY:
131 break;
132 default:
133 goto illegal;
134 }
135 break;
116 } 136 }
117 shost->shost_state = state; 137 shost->shost_state = state;
118 return 0; 138 return 0;
@@ -134,17 +154,29 @@ EXPORT_SYMBOL(scsi_host_set_state);
134 **/ 154 **/
135void scsi_remove_host(struct Scsi_Host *shost) 155void scsi_remove_host(struct Scsi_Host *shost)
136{ 156{
157 unsigned long flags;
137 down(&shost->scan_mutex); 158 down(&shost->scan_mutex);
138 scsi_host_set_state(shost, SHOST_CANCEL); 159 spin_lock_irqsave(shost->host_lock, flags);
160 if (scsi_host_set_state(shost, SHOST_CANCEL))
161 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) {
162 spin_unlock_irqrestore(shost->host_lock, flags);
163 up(&shost->scan_mutex);
164 return;
165 }
166 spin_unlock_irqrestore(shost->host_lock, flags);
139 up(&shost->scan_mutex); 167 up(&shost->scan_mutex);
140 scsi_forget_host(shost); 168 scsi_forget_host(shost);
141 scsi_proc_host_rm(shost); 169 scsi_proc_host_rm(shost);
142 170
143 scsi_host_set_state(shost, SHOST_DEL); 171 spin_lock_irqsave(shost->host_lock, flags);
172 if (scsi_host_set_state(shost, SHOST_DEL))
173 BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
174 spin_unlock_irqrestore(shost->host_lock, flags);
144 175
145 transport_unregister_device(&shost->shost_gendev); 176 transport_unregister_device(&shost->shost_gendev);
146 class_device_unregister(&shost->shost_classdev); 177 class_device_unregister(&shost->shost_classdev);
147 device_del(&shost->shost_gendev); 178 device_del(&shost->shost_gendev);
179 scsi_proc_hostdir_rm(shost->hostt);
148} 180}
149EXPORT_SYMBOL(scsi_remove_host); 181EXPORT_SYMBOL(scsi_remove_host);
150 182
@@ -231,7 +263,6 @@ static void scsi_host_dev_release(struct device *dev)
231 if (shost->work_q) 263 if (shost->work_q)
232 destroy_workqueue(shost->work_q); 264 destroy_workqueue(shost->work_q);
233 265
234 scsi_proc_hostdir_rm(shost->hostt);
235 scsi_destroy_command_freelist(shost); 266 scsi_destroy_command_freelist(shost);
236 kfree(shost->shost_data); 267 kfree(shost->shost_data);
237 268
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 6e54c7d9b33c..19392f651272 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -460,6 +460,8 @@ MODULE_PARM(adisplay, "1i");
460MODULE_PARM(normal, "1i"); 460MODULE_PARM(normal, "1i");
461MODULE_PARM(ansi, "1i"); 461MODULE_PARM(ansi, "1i");
462#endif 462#endif
463
464MODULE_LICENSE("GPL");
463#endif 465#endif
464/*counter of concurrent disk read/writes, to turn on/off disk led */ 466/*counter of concurrent disk read/writes, to turn on/off disk led */
465static int disk_rw_in_progress = 0; 467static int disk_rw_in_progress = 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 5b14934ba861..ff25210b00ba 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -727,6 +727,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
727 if (hostdata->madapter_info.port_max_txu[0]) 727 if (hostdata->madapter_info.port_max_txu[0])
728 hostdata->host->max_sectors = 728 hostdata->host->max_sectors =
729 hostdata->madapter_info.port_max_txu[0] >> 9; 729 hostdata->madapter_info.port_max_txu[0] >> 9;
730
731 if (hostdata->madapter_info.os_type == 3 &&
732 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
733 printk("ibmvscsi: host (Ver. %s) doesn't support large"
734 "transfers\n",
735 hostdata->madapter_info.srp_version);
736 printk("ibmvscsi: limiting scatterlists to %d\n",
737 MAX_INDIRECT_BUFS);
738 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
739 }
730 } 740 }
731} 741}
732 742
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 5cc53cd9323e..e5b01997117a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -2465,9 +2465,12 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
2465 * 2465 *
2466 * LOCKING: 2466 * LOCKING:
2467 * None. (executing in kernel thread context) 2467 * None. (executing in kernel thread context)
2468 *
2469 * RETURNS:
2470 * Non-zero if qc completed, zero otherwise.
2468 */ 2471 */
2469 2472
2470static void ata_pio_complete (struct ata_port *ap) 2473static int ata_pio_complete (struct ata_port *ap)
2471{ 2474{
2472 struct ata_queued_cmd *qc; 2475 struct ata_queued_cmd *qc;
2473 u8 drv_stat; 2476 u8 drv_stat;
@@ -2486,14 +2489,14 @@ static void ata_pio_complete (struct ata_port *ap)
2486 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2489 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2487 ap->pio_task_state = PIO_ST_LAST_POLL; 2490 ap->pio_task_state = PIO_ST_LAST_POLL;
2488 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2491 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2489 return; 2492 return 0;
2490 } 2493 }
2491 } 2494 }
2492 2495
2493 drv_stat = ata_wait_idle(ap); 2496 drv_stat = ata_wait_idle(ap);
2494 if (!ata_ok(drv_stat)) { 2497 if (!ata_ok(drv_stat)) {
2495 ap->pio_task_state = PIO_ST_ERR; 2498 ap->pio_task_state = PIO_ST_ERR;
2496 return; 2499 return 0;
2497 } 2500 }
2498 2501
2499 qc = ata_qc_from_tag(ap, ap->active_tag); 2502 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -2502,6 +2505,10 @@ static void ata_pio_complete (struct ata_port *ap)
2502 ap->pio_task_state = PIO_ST_IDLE; 2505 ap->pio_task_state = PIO_ST_IDLE;
2503 2506
2504 ata_poll_qc_complete(qc, drv_stat); 2507 ata_poll_qc_complete(qc, drv_stat);
2508
2509 /* another command may start at this point */
2510
2511 return 1;
2505} 2512}
2506 2513
2507 2514
@@ -2709,7 +2716,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2709 2716
2710next_sg: 2717next_sg:
2711 if (unlikely(qc->cursg >= qc->n_elem)) { 2718 if (unlikely(qc->cursg >= qc->n_elem)) {
2712 /* 2719 /*
2713 * The end of qc->sg is reached and the device expects 2720 * The end of qc->sg is reached and the device expects
2714 * more data to transfer. In order not to overrun qc->sg 2721 * more data to transfer. In order not to overrun qc->sg
2715 * and fulfill length specified in the byte count register, 2722 * and fulfill length specified in the byte count register,
@@ -2721,7 +2728,7 @@ next_sg:
2721 unsigned int i; 2728 unsigned int i;
2722 2729
2723 if (words) /* warning if bytes > 1 */ 2730 if (words) /* warning if bytes > 1 */
2724 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 2731 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
2725 ap->id, bytes); 2732 ap->id, bytes);
2726 2733
2727 for (i = 0; i < words; i++) 2734 for (i = 0; i < words; i++)
@@ -2849,9 +2856,7 @@ static void ata_pio_block(struct ata_port *ap)
2849 if (is_atapi_taskfile(&qc->tf)) { 2856 if (is_atapi_taskfile(&qc->tf)) {
2850 /* no more data to transfer or unsupported ATAPI command */ 2857 /* no more data to transfer or unsupported ATAPI command */
2851 if ((status & ATA_DRQ) == 0) { 2858 if ((status & ATA_DRQ) == 0) {
2852 ap->pio_task_state = PIO_ST_IDLE; 2859 ap->pio_task_state = PIO_ST_LAST;
2853
2854 ata_poll_qc_complete(qc, status);
2855 return; 2860 return;
2856 } 2861 }
2857 2862
@@ -2887,7 +2892,12 @@ static void ata_pio_error(struct ata_port *ap)
2887static void ata_pio_task(void *_data) 2892static void ata_pio_task(void *_data)
2888{ 2893{
2889 struct ata_port *ap = _data; 2894 struct ata_port *ap = _data;
2890 unsigned long timeout = 0; 2895 unsigned long timeout;
2896 int qc_completed;
2897
2898fsm_start:
2899 timeout = 0;
2900 qc_completed = 0;
2891 2901
2892 switch (ap->pio_task_state) { 2902 switch (ap->pio_task_state) {
2893 case PIO_ST_IDLE: 2903 case PIO_ST_IDLE:
@@ -2898,7 +2908,7 @@ static void ata_pio_task(void *_data)
2898 break; 2908 break;
2899 2909
2900 case PIO_ST_LAST: 2910 case PIO_ST_LAST:
2901 ata_pio_complete(ap); 2911 qc_completed = ata_pio_complete(ap);
2902 break; 2912 break;
2903 2913
2904 case PIO_ST_POLL: 2914 case PIO_ST_POLL:
@@ -2913,10 +2923,9 @@ static void ata_pio_task(void *_data)
2913 } 2923 }
2914 2924
2915 if (timeout) 2925 if (timeout)
2916 queue_delayed_work(ata_wq, &ap->pio_task, 2926 queue_delayed_work(ata_wq, &ap->pio_task, timeout);
2917 timeout); 2927 else if (!qc_completed)
2918 else 2928 goto fsm_start;
2919 queue_work(ata_wq, &ap->pio_task);
2920} 2929}
2921 2930
2922static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, 2931static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
@@ -4123,6 +4132,53 @@ err_out:
4123} 4132}
4124 4133
4125/** 4134/**
4135 * ata_host_set_remove - PCI layer callback for device removal
4136 * @host_set: ATA host set that was removed
4137 *
4138 * Unregister all objects associated with this host set. Free those
4139 * objects.
4140 *
4141 * LOCKING:
4142 * Inherited from calling layer (may sleep).
4143 */
4144
4145
4146void ata_host_set_remove(struct ata_host_set *host_set)
4147{
4148 struct ata_port *ap;
4149 unsigned int i;
4150
4151 for (i = 0; i < host_set->n_ports; i++) {
4152 ap = host_set->ports[i];
4153 scsi_remove_host(ap->host);
4154 }
4155
4156 free_irq(host_set->irq, host_set);
4157
4158 for (i = 0; i < host_set->n_ports; i++) {
4159 ap = host_set->ports[i];
4160
4161 ata_scsi_release(ap->host);
4162
4163 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4164 struct ata_ioports *ioaddr = &ap->ioaddr;
4165
4166 if (ioaddr->cmd_addr == 0x1f0)
4167 release_region(0x1f0, 8);
4168 else if (ioaddr->cmd_addr == 0x170)
4169 release_region(0x170, 8);
4170 }
4171
4172 scsi_host_put(ap->host);
4173 }
4174
4175 if (host_set->ops->host_stop)
4176 host_set->ops->host_stop(host_set);
4177
4178 kfree(host_set);
4179}
4180
4181/**
4126 * ata_scsi_release - SCSI layer callback hook for host unload 4182 * ata_scsi_release - SCSI layer callback hook for host unload
4127 * @host: libata host to be unloaded 4183 * @host: libata host to be unloaded
4128 * 4184 *
@@ -4462,39 +4518,8 @@ void ata_pci_remove_one (struct pci_dev *pdev)
4462{ 4518{
4463 struct device *dev = pci_dev_to_dev(pdev); 4519 struct device *dev = pci_dev_to_dev(pdev);
4464 struct ata_host_set *host_set = dev_get_drvdata(dev); 4520 struct ata_host_set *host_set = dev_get_drvdata(dev);
4465 struct ata_port *ap;
4466 unsigned int i;
4467
4468 for (i = 0; i < host_set->n_ports; i++) {
4469 ap = host_set->ports[i];
4470
4471 scsi_remove_host(ap->host);
4472 }
4473
4474 free_irq(host_set->irq, host_set);
4475
4476 for (i = 0; i < host_set->n_ports; i++) {
4477 ap = host_set->ports[i];
4478
4479 ata_scsi_release(ap->host);
4480
4481 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4482 struct ata_ioports *ioaddr = &ap->ioaddr;
4483
4484 if (ioaddr->cmd_addr == 0x1f0)
4485 release_region(0x1f0, 8);
4486 else if (ioaddr->cmd_addr == 0x170)
4487 release_region(0x170, 8);
4488 }
4489
4490 scsi_host_put(ap->host);
4491 }
4492
4493 if (host_set->ops->host_stop)
4494 host_set->ops->host_stop(host_set);
4495
4496 kfree(host_set);
4497 4521
4522 ata_host_set_remove(host_set);
4498 pci_release_regions(pdev); 4523 pci_release_regions(pdev);
4499 pci_disable_device(pdev); 4524 pci_disable_device(pdev);
4500 dev_set_drvdata(dev, NULL); 4525 dev_set_drvdata(dev, NULL);
@@ -4564,6 +4589,7 @@ module_exit(ata_exit);
4564EXPORT_SYMBOL_GPL(ata_std_bios_param); 4589EXPORT_SYMBOL_GPL(ata_std_bios_param);
4565EXPORT_SYMBOL_GPL(ata_std_ports); 4590EXPORT_SYMBOL_GPL(ata_std_ports);
4566EXPORT_SYMBOL_GPL(ata_device_add); 4591EXPORT_SYMBOL_GPL(ata_device_add);
4592EXPORT_SYMBOL_GPL(ata_host_set_remove);
4567EXPORT_SYMBOL_GPL(ata_sg_init); 4593EXPORT_SYMBOL_GPL(ata_sg_init);
4568EXPORT_SYMBOL_GPL(ata_sg_init_one); 4594EXPORT_SYMBOL_GPL(ata_sg_init_one);
4569EXPORT_SYMBOL_GPL(ata_qc_complete); 4595EXPORT_SYMBOL_GPL(ata_qc_complete);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 86eaf6d408d5..acae7c48ef7d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -973,10 +973,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
973 if ((phba->fc_flag & FC_FABRIC) || 973 if ((phba->fc_flag & FC_FABRIC) ||
974 ((phba->fc_topology == TOPOLOGY_LOOP) && 974 ((phba->fc_topology == TOPOLOGY_LOOP) &&
975 (phba->fc_flag & FC_PUBLIC_LOOP))) 975 (phba->fc_flag & FC_PUBLIC_LOOP)))
976 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn); 976 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
977 else 977 else
978 /* fabric is local port if there is no F/FL_Port */ 978 /* fabric is local port if there is no F/FL_Port */
979 node_name = wwn_to_u64(phba->fc_nodename.wwn); 979 node_name = wwn_to_u64(phba->fc_nodename.u.wwn);
980 980
981 spin_unlock_irq(shost->host_lock); 981 spin_unlock_irq(shost->host_lock);
982 982
@@ -1110,7 +1110,7 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
1110 /* Search the mapped list for this target ID */ 1110 /* Search the mapped list for this target ID */
1111 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1111 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1112 if (starget->id == ndlp->nlp_sid) { 1112 if (starget->id == ndlp->nlp_sid) {
1113 node_name = wwn_to_u64(ndlp->nlp_nodename.wwn); 1113 node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1114 break; 1114 break;
1115 } 1115 }
1116 } 1116 }
@@ -1131,7 +1131,7 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
1131 /* Search the mapped list for this target ID */ 1131 /* Search the mapped list for this target ID */
1132 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1132 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1133 if (starget->id == ndlp->nlp_sid) { 1133 if (starget->id == ndlp->nlp_sid) {
1134 port_name = wwn_to_u64(ndlp->nlp_portname.wwn); 1134 port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1135 break; 1135 break;
1136 } 1136 }
1137 } 1137 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4fb8eb0c84cf..56052f4510c3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1019,8 +1019,8 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1019 struct fc_rport_identifiers rport_ids; 1019 struct fc_rport_identifiers rport_ids;
1020 1020
1021 /* Remote port has reappeared. Re-register w/ FC transport */ 1021 /* Remote port has reappeared. Re-register w/ FC transport */
1022 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn); 1022 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1023 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn); 1023 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1024 rport_ids.port_id = ndlp->nlp_DID; 1024 rport_ids.port_id = ndlp->nlp_DID;
1025 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 1025 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1026 if (ndlp->nlp_type & NLP_FCP_TARGET) 1026 if (ndlp->nlp_type & NLP_FCP_TARGET)
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 047a87c26cc0..86c41981188b 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -280,9 +280,9 @@ struct lpfc_name {
280#define NAME_CCITT_GR_TYPE 0xE 280#define NAME_CCITT_GR_TYPE 0xE
281 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */ 281 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
282 uint8_t IEEE[6]; /* FC IEEE address */ 282 uint8_t IEEE[6]; /* FC IEEE address */
283 }; 283 } s;
284 uint8_t wwn[8]; 284 uint8_t wwn[8];
285 }; 285 } u;
286}; 286};
287 287
288struct csp { 288struct csp {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 454058f655db..0856ff7d3b33 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -285,7 +285,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
285 if (phba->SerialNumber[0] == 0) { 285 if (phba->SerialNumber[0] == 0) {
286 uint8_t *outptr; 286 uint8_t *outptr;
287 287
288 outptr = (uint8_t *) & phba->fc_nodename.IEEE[0]; 288 outptr = &phba->fc_nodename.u.s.IEEE[0];
289 for (i = 0; i < 12; i++) { 289 for (i = 0; i < 12; i++) {
290 status = *outptr++; 290 status = *outptr++;
291 j = ((status & 0xf0) >> 4); 291 j = ((status & 0xf0) >> 4);
@@ -1523,8 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1523 * Must done after lpfc_sli_hba_setup() 1523 * Must done after lpfc_sli_hba_setup()
1524 */ 1524 */
1525 1525
1526 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn); 1526 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
1527 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn); 1527 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
1528 fc_host_supported_classes(host) = FC_COS_CLASS3; 1528 fc_host_supported_classes(host) = FC_COS_CLASS3;
1529 1529
1530 memset(fc_host_supported_fc4s(host), 0, 1530 memset(fc_host_supported_fc4s(host), 0,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 6f308ebe3e79..61a6fd810bb4 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -621,8 +621,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
621 if(islogical) { 621 if(islogical) {
622 switch (cmd->cmnd[0]) { 622 switch (cmd->cmnd[0]) {
623 case TEST_UNIT_READY: 623 case TEST_UNIT_READY:
624 memset(cmd->request_buffer, 0, cmd->request_bufflen);
625
626#if MEGA_HAVE_CLUSTERING 624#if MEGA_HAVE_CLUSTERING
627 /* 625 /*
628 * Do we support clustering and is the support enabled 626 * Do we support clustering and is the support enabled
@@ -652,11 +650,28 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
652 return NULL; 650 return NULL;
653#endif 651#endif
654 652
655 case MODE_SENSE: 653 case MODE_SENSE: {
654 char *buf;
655
656 if (cmd->use_sg) {
657 struct scatterlist *sg;
658
659 sg = (struct scatterlist *)cmd->request_buffer;
660 buf = kmap_atomic(sg->page, KM_IRQ0) +
661 sg->offset;
662 } else
663 buf = cmd->request_buffer;
656 memset(cmd->request_buffer, 0, cmd->cmnd[4]); 664 memset(cmd->request_buffer, 0, cmd->cmnd[4]);
665 if (cmd->use_sg) {
666 struct scatterlist *sg;
667
668 sg = (struct scatterlist *)cmd->request_buffer;
669 kunmap_atomic(buf - sg->offset, KM_IRQ0);
670 }
657 cmd->result = (DID_OK << 16); 671 cmd->result = (DID_OK << 16);
658 cmd->scsi_done(cmd); 672 cmd->scsi_done(cmd);
659 return NULL; 673 return NULL;
674 }
660 675
661 case READ_CAPACITY: 676 case READ_CAPACITY:
662 case INQUIRY: 677 case INQUIRY:
@@ -1685,14 +1700,23 @@ mega_rundoneq (adapter_t *adapter)
1685static void 1700static void
1686mega_free_scb(adapter_t *adapter, scb_t *scb) 1701mega_free_scb(adapter_t *adapter, scb_t *scb)
1687{ 1702{
1703 unsigned long length;
1704
1688 switch( scb->dma_type ) { 1705 switch( scb->dma_type ) {
1689 1706
1690 case MEGA_DMA_TYPE_NONE: 1707 case MEGA_DMA_TYPE_NONE:
1691 break; 1708 break;
1692 1709
1693 case MEGA_BULK_DATA: 1710 case MEGA_BULK_DATA:
1711 if (scb->cmd->use_sg == 0)
1712 length = scb->cmd->request_bufflen;
1713 else {
1714 struct scatterlist *sgl =
1715 (struct scatterlist *)scb->cmd->request_buffer;
1716 length = sgl->length;
1717 }
1694 pci_unmap_page(adapter->dev, scb->dma_h_bulkdata, 1718 pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
1695 scb->cmd->request_bufflen, scb->dma_direction); 1719 length, scb->dma_direction);
1696 break; 1720 break;
1697 1721
1698 case MEGA_SGLIST: 1722 case MEGA_SGLIST:
@@ -1741,6 +1765,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1741 struct scatterlist *sgl; 1765 struct scatterlist *sgl;
1742 struct page *page; 1766 struct page *page;
1743 unsigned long offset; 1767 unsigned long offset;
1768 unsigned int length;
1744 Scsi_Cmnd *cmd; 1769 Scsi_Cmnd *cmd;
1745 int sgcnt; 1770 int sgcnt;
1746 int idx; 1771 int idx;
@@ -1748,14 +1773,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1748 cmd = scb->cmd; 1773 cmd = scb->cmd;
1749 1774
1750 /* Scatter-gather not used */ 1775 /* Scatter-gather not used */
1751 if( !cmd->use_sg ) { 1776 if( cmd->use_sg == 0 || (cmd->use_sg == 1 &&
1752 1777 !adapter->has_64bit_addr)) {
1753 page = virt_to_page(cmd->request_buffer); 1778
1754 offset = offset_in_page(cmd->request_buffer); 1779 if (cmd->use_sg == 0) {
1780 page = virt_to_page(cmd->request_buffer);
1781 offset = offset_in_page(cmd->request_buffer);
1782 length = cmd->request_bufflen;
1783 } else {
1784 sgl = (struct scatterlist *)cmd->request_buffer;
1785 page = sgl->page;
1786 offset = sgl->offset;
1787 length = sgl->length;
1788 }
1755 1789
1756 scb->dma_h_bulkdata = pci_map_page(adapter->dev, 1790 scb->dma_h_bulkdata = pci_map_page(adapter->dev,
1757 page, offset, 1791 page, offset,
1758 cmd->request_bufflen, 1792 length,
1759 scb->dma_direction); 1793 scb->dma_direction);
1760 scb->dma_type = MEGA_BULK_DATA; 1794 scb->dma_type = MEGA_BULK_DATA;
1761 1795
@@ -1765,14 +1799,14 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1765 */ 1799 */
1766 if( adapter->has_64bit_addr ) { 1800 if( adapter->has_64bit_addr ) {
1767 scb->sgl64[0].address = scb->dma_h_bulkdata; 1801 scb->sgl64[0].address = scb->dma_h_bulkdata;
1768 scb->sgl64[0].length = cmd->request_bufflen; 1802 scb->sgl64[0].length = length;
1769 *buf = (u32)scb->sgl_dma_addr; 1803 *buf = (u32)scb->sgl_dma_addr;
1770 *len = (u32)cmd->request_bufflen; 1804 *len = (u32)length;
1771 return 1; 1805 return 1;
1772 } 1806 }
1773 else { 1807 else {
1774 *buf = (u32)scb->dma_h_bulkdata; 1808 *buf = (u32)scb->dma_h_bulkdata;
1775 *len = (u32)cmd->request_bufflen; 1809 *len = (u32)length;
1776 } 1810 }
1777 return 0; 1811 return 0;
1778 } 1812 }
@@ -1791,27 +1825,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1791 1825
1792 if( sgcnt > adapter->sglen ) BUG(); 1826 if( sgcnt > adapter->sglen ) BUG();
1793 1827
1828 *len = 0;
1829
1794 for( idx = 0; idx < sgcnt; idx++, sgl++ ) { 1830 for( idx = 0; idx < sgcnt; idx++, sgl++ ) {
1795 1831
1796 if( adapter->has_64bit_addr ) { 1832 if( adapter->has_64bit_addr ) {
1797 scb->sgl64[idx].address = sg_dma_address(sgl); 1833 scb->sgl64[idx].address = sg_dma_address(sgl);
1798 scb->sgl64[idx].length = sg_dma_len(sgl); 1834 *len += scb->sgl64[idx].length = sg_dma_len(sgl);
1799 } 1835 }
1800 else { 1836 else {
1801 scb->sgl[idx].address = sg_dma_address(sgl); 1837 scb->sgl[idx].address = sg_dma_address(sgl);
1802 scb->sgl[idx].length = sg_dma_len(sgl); 1838 *len += scb->sgl[idx].length = sg_dma_len(sgl);
1803 } 1839 }
1804 } 1840 }
1805 1841
1806 /* Reset pointer and length fields */ 1842 /* Reset pointer and length fields */
1807 *buf = scb->sgl_dma_addr; 1843 *buf = scb->sgl_dma_addr;
1808 1844
1809 /*
1810 * For passthru command, dataxferlen must be set, even for commands
1811 * with a sg list
1812 */
1813 *len = (u32)cmd->request_bufflen;
1814
1815 /* Return count of SG requests */ 1845 /* Return count of SG requests */
1816 return sgcnt; 1846 return sgcnt;
1817} 1847}
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
index 917d591d90b2..7363e12663ac 100644
--- a/drivers/scsi/megaraid/Kconfig.megaraid
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -76,3 +76,12 @@ config MEGARAID_LEGACY
76 To compile this driver as a module, choose M here: the 76 To compile this driver as a module, choose M here: the
77 module will be called megaraid 77 module will be called megaraid
78endif 78endif
79
80config MEGARAID_SAS
81 tristate "LSI Logic MegaRAID SAS RAID Module"
82 depends on PCI && SCSI
83 help
84 Module for LSI Logic's SAS based RAID controllers.
85 To compile this driver as a module, choose 'm' here.
86 Module will be called megaraid_sas
87
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index 6dd99f275722..f469915b97c3 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o 1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o 2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
3obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
new file mode 100644
index 000000000000..1b3148e842af
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -0,0 +1,2805 @@
1/*
2 *
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2003-2005 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_sas.c
13 * Version : v00.00.02.00-rc4
14 *
15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
17 * Sumant Patro <Sumant.Patro@lsil.com>
18 *
19 * List of supported controllers
20 *
21 * OEM Product Name VID DID SSVID SSID
22 * --- ------------ --- --- ---- ----
23 */
24
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/pci.h>
28#include <linux/list.h>
29#include <linux/version.h>
30#include <linux/moduleparam.h>
31#include <linux/module.h>
32#include <linux/spinlock.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/uio.h>
36#include <asm/uaccess.h>
37#include <linux/compat.h>
38
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_host.h>
43#include "megaraid_sas.h"
44
45MODULE_LICENSE("GPL");
46MODULE_VERSION(MEGASAS_VERSION);
47MODULE_AUTHOR("sreenivas.bagalkote@lsil.com");
48MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
49
50/*
51 * PCI ID table for all supported controllers
52 */
53static struct pci_device_id megasas_pci_table[] = {
54
55 {
56 PCI_VENDOR_ID_LSI_LOGIC,
57 PCI_DEVICE_ID_LSI_SAS1064R,
58 PCI_ANY_ID,
59 PCI_ANY_ID,
60 },
61 {
62 PCI_VENDOR_ID_DELL,
63 PCI_DEVICE_ID_DELL_PERC5,
64 PCI_ANY_ID,
65 PCI_ANY_ID,
66 },
67 {0} /* Terminating entry */
68};
69
70MODULE_DEVICE_TABLE(pci, megasas_pci_table);
71
72static int megasas_mgmt_majorno;
73static struct megasas_mgmt_info megasas_mgmt_info;
74static struct fasync_struct *megasas_async_queue;
75static DECLARE_MUTEX(megasas_async_queue_mutex);
76
77/**
78 * megasas_get_cmd - Get a command from the free pool
79 * @instance: Adapter soft state
80 *
81 * Returns a free command from the pool
82 */
83static inline struct megasas_cmd *megasas_get_cmd(struct megasas_instance
84 *instance)
85{
86 unsigned long flags;
87 struct megasas_cmd *cmd = NULL;
88
89 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
90
91 if (!list_empty(&instance->cmd_pool)) {
92 cmd = list_entry((&instance->cmd_pool)->next,
93 struct megasas_cmd, list);
94 list_del_init(&cmd->list);
95 } else {
96 printk(KERN_ERR "megasas: Command pool empty!\n");
97 }
98
99 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
100 return cmd;
101}
102
103/**
104 * megasas_return_cmd - Return a cmd to free command pool
105 * @instance: Adapter soft state
106 * @cmd: Command packet to be returned to free command pool
107 */
108static inline void
109megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
110{
111 unsigned long flags;
112
113 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
114
115 cmd->scmd = NULL;
116 list_add_tail(&cmd->list, &instance->cmd_pool);
117
118 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
119}
120
121/**
122 * megasas_enable_intr - Enables interrupts
123 * @regs: MFI register set
124 */
125static inline void
126megasas_enable_intr(struct megasas_register_set __iomem * regs)
127{
128 writel(1, &(regs)->outbound_intr_mask);
129
130 /* Dummy readl to force pci flush */
131 readl(&regs->outbound_intr_mask);
132}
133
134/**
135 * megasas_disable_intr - Disables interrupts
136 * @regs: MFI register set
137 */
138static inline void
139megasas_disable_intr(struct megasas_register_set __iomem * regs)
140{
141 u32 mask = readl(&regs->outbound_intr_mask) & (~0x00000001);
142 writel(mask, &regs->outbound_intr_mask);
143
144 /* Dummy readl to force pci flush */
145 readl(&regs->outbound_intr_mask);
146}
147
148/**
149 * megasas_issue_polled - Issues a polling command
150 * @instance: Adapter soft state
151 * @cmd: Command packet to be issued
152 *
153 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
154 */
155static int
156megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
157{
158 int i;
159 u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
160
161 struct megasas_header *frame_hdr = &cmd->frame->hdr;
162
163 frame_hdr->cmd_status = 0xFF;
164 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
165
166 /*
167 * Issue the frame using inbound queue port
168 */
169 writel(cmd->frame_phys_addr >> 3,
170 &instance->reg_set->inbound_queue_port);
171
172 /*
173 * Wait for cmd_status to change
174 */
175 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) {
176 rmb();
177 msleep(1);
178 }
179
180 if (frame_hdr->cmd_status == 0xff)
181 return -ETIME;
182
183 return 0;
184}
185
186/**
187 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
188 * @instance: Adapter soft state
189 * @cmd: Command to be issued
190 *
191 * This function waits on an event for the command to be returned from ISR.
192 * Used to issue ioctl commands.
193 */
194static int
195megasas_issue_blocked_cmd(struct megasas_instance *instance,
196 struct megasas_cmd *cmd)
197{
198 cmd->cmd_status = ENODATA;
199
200 writel(cmd->frame_phys_addr >> 3,
201 &instance->reg_set->inbound_queue_port);
202
203 wait_event(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA));
204
205 return 0;
206}
207
208/**
209 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
210 * @instance: Adapter soft state
211 * @cmd_to_abort: Previously issued cmd to be aborted
212 *
213 * MFI firmware can abort previously issued AEN comamnd (automatic event
214 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
215 * cmd and blocks till it is completed.
216 */
217static int
218megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
219 struct megasas_cmd *cmd_to_abort)
220{
221 struct megasas_cmd *cmd;
222 struct megasas_abort_frame *abort_fr;
223
224 cmd = megasas_get_cmd(instance);
225
226 if (!cmd)
227 return -1;
228
229 abort_fr = &cmd->frame->abort;
230
231 /*
232 * Prepare and issue the abort frame
233 */
234 abort_fr->cmd = MFI_CMD_ABORT;
235 abort_fr->cmd_status = 0xFF;
236 abort_fr->flags = 0;
237 abort_fr->abort_context = cmd_to_abort->index;
238 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
239 abort_fr->abort_mfi_phys_addr_hi = 0;
240
241 cmd->sync_cmd = 1;
242 cmd->cmd_status = 0xFF;
243
244 writel(cmd->frame_phys_addr >> 3,
245 &instance->reg_set->inbound_queue_port);
246
247 /*
248 * Wait for this cmd to complete
249 */
250 wait_event(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF));
251
252 megasas_return_cmd(instance, cmd);
253 return 0;
254}
255
256/**
257 * megasas_make_sgl32 - Prepares 32-bit SGL
258 * @instance: Adapter soft state
259 * @scp: SCSI command from the mid-layer
260 * @mfi_sgl: SGL to be filled in
261 *
262 * If successful, this function returns the number of SG elements. Otherwise,
263 * it returnes -1.
264 */
265static inline int
266megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
267 union megasas_sgl *mfi_sgl)
268{
269 int i;
270 int sge_count;
271 struct scatterlist *os_sgl;
272
273 /*
274 * Return 0 if there is no data transfer
275 */
276 if (!scp->request_buffer || !scp->request_bufflen)
277 return 0;
278
279 if (!scp->use_sg) {
280 mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev,
281 scp->
282 request_buffer,
283 scp->
284 request_bufflen,
285 scp->
286 sc_data_direction);
287 mfi_sgl->sge32[0].length = scp->request_bufflen;
288
289 return 1;
290 }
291
292 os_sgl = (struct scatterlist *)scp->request_buffer;
293 sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
294 scp->sc_data_direction);
295
296 for (i = 0; i < sge_count; i++, os_sgl++) {
297 mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
298 mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
299 }
300
301 return sge_count;
302}
303
304/**
305 * megasas_make_sgl64 - Prepares 64-bit SGL
306 * @instance: Adapter soft state
307 * @scp: SCSI command from the mid-layer
308 * @mfi_sgl: SGL to be filled in
309 *
310 * If successful, this function returns the number of SG elements. Otherwise,
311 * it returnes -1.
312 */
313static inline int
314megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
315 union megasas_sgl *mfi_sgl)
316{
317 int i;
318 int sge_count;
319 struct scatterlist *os_sgl;
320
321 /*
322 * Return 0 if there is no data transfer
323 */
324 if (!scp->request_buffer || !scp->request_bufflen)
325 return 0;
326
327 if (!scp->use_sg) {
328 mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev,
329 scp->
330 request_buffer,
331 scp->
332 request_bufflen,
333 scp->
334 sc_data_direction);
335
336 mfi_sgl->sge64[0].length = scp->request_bufflen;
337
338 return 1;
339 }
340
341 os_sgl = (struct scatterlist *)scp->request_buffer;
342 sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
343 scp->sc_data_direction);
344
345 for (i = 0; i < sge_count; i++, os_sgl++) {
346 mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
347 mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
348 }
349
350 return sge_count;
351}
352
353/**
354 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
355 * @instance: Adapter soft state
356 * @scp: SCSI command
357 * @cmd: Command to be prepared in
358 *
359 * This function prepares CDB commands. These are typcially pass-through
360 * commands to the devices.
361 */
362static inline int
363megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
364 struct megasas_cmd *cmd)
365{
366 u32 sge_sz;
367 int sge_bytes;
368 u32 is_logical;
369 u32 device_id;
370 u16 flags = 0;
371 struct megasas_pthru_frame *pthru;
372
373 is_logical = MEGASAS_IS_LOGICAL(scp);
374 device_id = MEGASAS_DEV_INDEX(instance, scp);
375 pthru = (struct megasas_pthru_frame *)cmd->frame;
376
377 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
378 flags = MFI_FRAME_DIR_WRITE;
379 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
380 flags = MFI_FRAME_DIR_READ;
381 else if (scp->sc_data_direction == PCI_DMA_NONE)
382 flags = MFI_FRAME_DIR_NONE;
383
384 /*
385 * Prepare the DCDB frame
386 */
387 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
388 pthru->cmd_status = 0x0;
389 pthru->scsi_status = 0x0;
390 pthru->target_id = device_id;
391 pthru->lun = scp->device->lun;
392 pthru->cdb_len = scp->cmd_len;
393 pthru->timeout = 0;
394 pthru->flags = flags;
395 pthru->data_xfer_len = scp->request_bufflen;
396
397 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
398
399 /*
400 * Construct SGL
401 */
402 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
403 sizeof(struct megasas_sge32);
404
405 if (IS_DMA64) {
406 pthru->flags |= MFI_FRAME_SGL64;
407 pthru->sge_count = megasas_make_sgl64(instance, scp,
408 &pthru->sgl);
409 } else
410 pthru->sge_count = megasas_make_sgl32(instance, scp,
411 &pthru->sgl);
412
413 /*
414 * Sense info specific
415 */
416 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
417 pthru->sense_buf_phys_addr_hi = 0;
418 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
419
420 sge_bytes = sge_sz * pthru->sge_count;
421
422 /*
423 * Compute the total number of frames this command consumes. FW uses
424 * this number to pull sufficient number of frames from host memory.
425 */
426 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
427 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
428
429 if (cmd->frame_count > 7)
430 cmd->frame_count = 8;
431
432 return cmd->frame_count;
433}
434
435/**
436 * megasas_build_ldio - Prepares IOs to logical devices
437 * @instance: Adapter soft state
438 * @scp: SCSI command
439 * @cmd: Command to to be prepared
440 *
441 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
442 */
443static inline int
444megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
445 struct megasas_cmd *cmd)
446{
447 u32 sge_sz;
448 int sge_bytes;
449 u32 device_id;
450 u8 sc = scp->cmnd[0];
451 u16 flags = 0;
452 struct megasas_io_frame *ldio;
453
454 device_id = MEGASAS_DEV_INDEX(instance, scp);
455 ldio = (struct megasas_io_frame *)cmd->frame;
456
457 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
458 flags = MFI_FRAME_DIR_WRITE;
459 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
460 flags = MFI_FRAME_DIR_READ;
461
462 /*
463 * Preare the Logical IO frame: 2nd bit is zero for all read cmds
464 */
465 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
466 ldio->cmd_status = 0x0;
467 ldio->scsi_status = 0x0;
468 ldio->target_id = device_id;
469 ldio->timeout = 0;
470 ldio->reserved_0 = 0;
471 ldio->pad_0 = 0;
472 ldio->flags = flags;
473 ldio->start_lba_hi = 0;
474 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
475
476 /*
477 * 6-byte READ(0x08) or WRITE(0x0A) cdb
478 */
479 if (scp->cmd_len == 6) {
480 ldio->lba_count = (u32) scp->cmnd[4];
481 ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
482 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
483
484 ldio->start_lba_lo &= 0x1FFFFF;
485 }
486
487 /*
488 * 10-byte READ(0x28) or WRITE(0x2A) cdb
489 */
490 else if (scp->cmd_len == 10) {
491 ldio->lba_count = (u32) scp->cmnd[8] |
492 ((u32) scp->cmnd[7] << 8);
493 ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
494 ((u32) scp->cmnd[3] << 16) |
495 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
496 }
497
498 /*
499 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
500 */
501 else if (scp->cmd_len == 12) {
502 ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
503 ((u32) scp->cmnd[7] << 16) |
504 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
505
506 ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
507 ((u32) scp->cmnd[3] << 16) |
508 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
509 }
510
511 /*
512 * 16-byte READ(0x88) or WRITE(0x8A) cdb
513 */
514 else if (scp->cmd_len == 16) {
515 ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
516 ((u32) scp->cmnd[11] << 16) |
517 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
518
519 ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
520 ((u32) scp->cmnd[7] << 16) |
521 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
522
523 ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
524 ((u32) scp->cmnd[3] << 16) |
525 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
526
527 }
528
529 /*
530 * Construct SGL
531 */
532 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
533 sizeof(struct megasas_sge32);
534
535 if (IS_DMA64) {
536 ldio->flags |= MFI_FRAME_SGL64;
537 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
538 } else
539 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
540
541 /*
542 * Sense info specific
543 */
544 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
545 ldio->sense_buf_phys_addr_hi = 0;
546 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
547
548 sge_bytes = sge_sz * ldio->sge_count;
549
550 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
551 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
552
553 if (cmd->frame_count > 7)
554 cmd->frame_count = 8;
555
556 return cmd->frame_count;
557}
558
559/**
560 * megasas_build_cmd - Prepares a command packet
561 * @instance: Adapter soft state
562 * @scp: SCSI command
563 * @frame_count: [OUT] Number of frames used to prepare this command
564 */
565static inline struct megasas_cmd *megasas_build_cmd(struct megasas_instance
566 *instance,
567 struct scsi_cmnd *scp,
568 int *frame_count)
569{
570 u32 logical_cmd;
571 struct megasas_cmd *cmd;
572
573 /*
574 * Find out if this is logical or physical drive command.
575 */
576 logical_cmd = MEGASAS_IS_LOGICAL(scp);
577
578 /*
579 * Logical drive command
580 */
581 if (logical_cmd) {
582
583 if (scp->device->id >= MEGASAS_MAX_LD) {
584 scp->result = DID_BAD_TARGET << 16;
585 return NULL;
586 }
587
588 switch (scp->cmnd[0]) {
589
590 case READ_10:
591 case WRITE_10:
592 case READ_12:
593 case WRITE_12:
594 case READ_6:
595 case WRITE_6:
596 case READ_16:
597 case WRITE_16:
598 /*
599 * Fail for LUN > 0
600 */
601 if (scp->device->lun) {
602 scp->result = DID_BAD_TARGET << 16;
603 return NULL;
604 }
605
606 cmd = megasas_get_cmd(instance);
607
608 if (!cmd) {
609 scp->result = DID_IMM_RETRY << 16;
610 return NULL;
611 }
612
613 *frame_count = megasas_build_ldio(instance, scp, cmd);
614
615 if (!(*frame_count)) {
616 megasas_return_cmd(instance, cmd);
617 return NULL;
618 }
619
620 return cmd;
621
622 default:
623 /*
624 * Fail for LUN > 0
625 */
626 if (scp->device->lun) {
627 scp->result = DID_BAD_TARGET << 16;
628 return NULL;
629 }
630
631 cmd = megasas_get_cmd(instance);
632
633 if (!cmd) {
634 scp->result = DID_IMM_RETRY << 16;
635 return NULL;
636 }
637
638 *frame_count = megasas_build_dcdb(instance, scp, cmd);
639
640 if (!(*frame_count)) {
641 megasas_return_cmd(instance, cmd);
642 return NULL;
643 }
644
645 return cmd;
646 }
647 } else {
648 cmd = megasas_get_cmd(instance);
649
650 if (!cmd) {
651 scp->result = DID_IMM_RETRY << 16;
652 return NULL;
653 }
654
655 *frame_count = megasas_build_dcdb(instance, scp, cmd);
656
657 if (!(*frame_count)) {
658 megasas_return_cmd(instance, cmd);
659 return NULL;
660 }
661
662 return cmd;
663 }
664
665 return NULL;
666}
667
668/**
669 * megasas_queue_command - Queue entry point
670 * @scmd: SCSI command to be queued
671 * @done: Callback entry point
672 */
673static int
674megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
675{
676 u32 frame_count;
677 unsigned long flags;
678 struct megasas_cmd *cmd;
679 struct megasas_instance *instance;
680
681 instance = (struct megasas_instance *)
682 scmd->device->host->hostdata;
683 scmd->scsi_done = done;
684 scmd->result = 0;
685
686 cmd = megasas_build_cmd(instance, scmd, &frame_count);
687
688 if (!cmd) {
689 done(scmd);
690 return 0;
691 }
692
693 cmd->scmd = scmd;
694 scmd->SCp.ptr = (char *)cmd;
695 scmd->SCp.sent_command = jiffies;
696
697 /*
698 * Issue the command to the FW
699 */
700 spin_lock_irqsave(&instance->instance_lock, flags);
701 instance->fw_outstanding++;
702 spin_unlock_irqrestore(&instance->instance_lock, flags);
703
704 writel(((cmd->frame_phys_addr >> 3) | (cmd->frame_count - 1)),
705 &instance->reg_set->inbound_queue_port);
706
707 return 0;
708}
709
710/**
711 * megasas_wait_for_outstanding - Wait for all outstanding cmds
712 * @instance: Adapter soft state
713 *
714 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
715 * complete all its outstanding commands. Returns error if one or more IOs
716 * are pending after this time period. It also marks the controller dead.
717 */
718static int megasas_wait_for_outstanding(struct megasas_instance *instance)
719{
720 int i;
721 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
722
723 for (i = 0; i < wait_time; i++) {
724
725 if (!instance->fw_outstanding)
726 break;
727
728 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
729 printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
730 "commands to complete\n", i,
731 instance->fw_outstanding);
732 }
733
734 msleep(1000);
735 }
736
737 if (instance->fw_outstanding) {
738 instance->hw_crit_error = 1;
739 return FAILED;
740 }
741
742 return SUCCESS;
743}
744
745/**
746 * megasas_generic_reset - Generic reset routine
747 * @scmd: Mid-layer SCSI command
748 *
749 * This routine implements a generic reset handler for device, bus and host
750 * reset requests. Device, bus and host specific reset handlers can use this
751 * function after they do their specific tasks.
752 */
753static int megasas_generic_reset(struct scsi_cmnd *scmd)
754{
755 int ret_val;
756 struct megasas_instance *instance;
757
758 instance = (struct megasas_instance *)scmd->device->host->hostdata;
759
760 printk(KERN_NOTICE "megasas: RESET -%ld cmd=%x <c=%d t=%d l=%d>\n",
761 scmd->serial_number, scmd->cmnd[0], scmd->device->channel,
762 scmd->device->id, scmd->device->lun);
763
764 if (instance->hw_crit_error) {
765 printk(KERN_ERR "megasas: cannot recover from previous reset "
766 "failures\n");
767 return FAILED;
768 }
769
770 spin_unlock(scmd->device->host->host_lock);
771
772 ret_val = megasas_wait_for_outstanding(instance);
773
774 if (ret_val == SUCCESS)
775 printk(KERN_NOTICE "megasas: reset successful \n");
776 else
777 printk(KERN_ERR "megasas: failed to do reset\n");
778
779 spin_lock(scmd->device->host->host_lock);
780
781 return ret_val;
782}
783
784static enum scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
785{
786 unsigned long seconds;
787
788 if (scmd->SCp.ptr) {
789 seconds = (jiffies - scmd->SCp.sent_command) / HZ;
790
791 if (seconds < 90) {
792 return EH_RESET_TIMER;
793 } else {
794 return EH_NOT_HANDLED;
795 }
796 }
797
798 return EH_HANDLED;
799}
800
801/**
802 * megasas_reset_device - Device reset handler entry point
803 */
804static int megasas_reset_device(struct scsi_cmnd *scmd)
805{
806 int ret;
807
808 /*
809 * First wait for all commands to complete
810 */
811 ret = megasas_generic_reset(scmd);
812
813 return ret;
814}
815
816/**
817 * megasas_reset_bus_host - Bus & host reset handler entry point
818 */
819static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
820{
821 int ret;
822
823 /*
824 * Frist wait for all commands to complete
825 */
826 ret = megasas_generic_reset(scmd);
827
828 return ret;
829}
830
831/**
832 * megasas_service_aen - Processes an event notification
833 * @instance: Adapter soft state
834 * @cmd: AEN command completed by the ISR
835 *
836 * For AEN, driver sends a command down to FW that is held by the FW till an
837 * event occurs. When an event of interest occurs, FW completes the command
838 * that it was previously holding.
839 *
840 * This routines sends SIGIO signal to processes that have registered with the
841 * driver for AEN.
842 */
843static void
844megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
845{
846 /*
847 * Don't signal app if it is just an aborted previously registered aen
848 */
849 if (!cmd->abort_aen)
850 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
851 else
852 cmd->abort_aen = 0;
853
854 instance->aen_cmd = NULL;
855 megasas_return_cmd(instance, cmd);
856}
857
858/*
859 * Scsi host template for megaraid_sas driver
860 */
861static struct scsi_host_template megasas_template = {
862
863 .module = THIS_MODULE,
864 .name = "LSI Logic SAS based MegaRAID driver",
865 .proc_name = "megaraid_sas",
866 .queuecommand = megasas_queue_command,
867 .eh_device_reset_handler = megasas_reset_device,
868 .eh_bus_reset_handler = megasas_reset_bus_host,
869 .eh_host_reset_handler = megasas_reset_bus_host,
870 .eh_timed_out = megasas_reset_timer,
871 .use_clustering = ENABLE_CLUSTERING,
872};
873
874/**
875 * megasas_complete_int_cmd - Completes an internal command
876 * @instance: Adapter soft state
877 * @cmd: Command to be completed
878 *
879 * The megasas_issue_blocked_cmd() function waits for a command to complete
880 * after it issues a command. This function wakes up that waiting routine by
881 * calling wake_up() on the wait queue.
882 */
883static void
884megasas_complete_int_cmd(struct megasas_instance *instance,
885 struct megasas_cmd *cmd)
886{
887 cmd->cmd_status = cmd->frame->io.cmd_status;
888
889 if (cmd->cmd_status == ENODATA) {
890 cmd->cmd_status = 0;
891 }
892 wake_up(&instance->int_cmd_wait_q);
893}
894
895/**
896 * megasas_complete_abort - Completes aborting a command
897 * @instance: Adapter soft state
898 * @cmd: Cmd that was issued to abort another cmd
899 *
900 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
901 * after it issues an abort on a previously issued command. This function
902 * wakes up all functions waiting on the same wait queue.
903 */
904static void
905megasas_complete_abort(struct megasas_instance *instance,
906 struct megasas_cmd *cmd)
907{
908 if (cmd->sync_cmd) {
909 cmd->sync_cmd = 0;
910 cmd->cmd_status = 0;
911 wake_up(&instance->abort_cmd_wait_q);
912 }
913
914 return;
915}
916
917/**
918 * megasas_unmap_sgbuf - Unmap SG buffers
919 * @instance: Adapter soft state
920 * @cmd: Completed command
921 */
922static inline void
923megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd)
924{
925 dma_addr_t buf_h;
926 u8 opcode;
927
928 if (cmd->scmd->use_sg) {
929 pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer,
930 cmd->scmd->use_sg, cmd->scmd->sc_data_direction);
931 return;
932 }
933
934 if (!cmd->scmd->request_bufflen)
935 return;
936
937 opcode = cmd->frame->hdr.cmd;
938
939 if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) {
940 if (IS_DMA64)
941 buf_h = cmd->frame->io.sgl.sge64[0].phys_addr;
942 else
943 buf_h = cmd->frame->io.sgl.sge32[0].phys_addr;
944 } else {
945 if (IS_DMA64)
946 buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr;
947 else
948 buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr;
949 }
950
951 pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen,
952 cmd->scmd->sc_data_direction);
953 return;
954}
955
956/**
957 * megasas_complete_cmd - Completes a command
958 * @instance: Adapter soft state
959 * @cmd: Command to be completed
960 * @alt_status: If non-zero, use this value as status to
961 * SCSI mid-layer instead of the value returned
962 * by the FW. This should be used if caller wants
963 * an alternate status (as in the case of aborted
964 * commands)
965 */
966static inline void
967megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
968 u8 alt_status)
969{
970 int exception = 0;
971 struct megasas_header *hdr = &cmd->frame->hdr;
972 unsigned long flags;
973
974 if (cmd->scmd) {
975 cmd->scmd->SCp.ptr = (char *)0;
976 }
977
978 switch (hdr->cmd) {
979
980 case MFI_CMD_PD_SCSI_IO:
981 case MFI_CMD_LD_SCSI_IO:
982
983 /*
984 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
985 * issued either through an IO path or an IOCTL path. If it
986 * was via IOCTL, we will send it to internal completion.
987 */
988 if (cmd->sync_cmd) {
989 cmd->sync_cmd = 0;
990 megasas_complete_int_cmd(instance, cmd);
991 break;
992 }
993
994 /*
995 * Don't export physical disk devices to mid-layer.
996 */
997 if (!MEGASAS_IS_LOGICAL(cmd->scmd) &&
998 (hdr->cmd_status == MFI_STAT_OK) &&
999 (cmd->scmd->cmnd[0] == INQUIRY)) {
1000
1001 if (((*(u8 *) cmd->scmd->request_buffer) & 0x1F) ==
1002 TYPE_DISK) {
1003 cmd->scmd->result = DID_BAD_TARGET << 16;
1004 exception = 1;
1005 }
1006 }
1007
1008 case MFI_CMD_LD_READ:
1009 case MFI_CMD_LD_WRITE:
1010
1011 if (alt_status) {
1012 cmd->scmd->result = alt_status << 16;
1013 exception = 1;
1014 }
1015
1016 if (exception) {
1017
1018 spin_lock_irqsave(&instance->instance_lock, flags);
1019 instance->fw_outstanding--;
1020 spin_unlock_irqrestore(&instance->instance_lock, flags);
1021
1022 megasas_unmap_sgbuf(instance, cmd);
1023 cmd->scmd->scsi_done(cmd->scmd);
1024 megasas_return_cmd(instance, cmd);
1025
1026 break;
1027 }
1028
1029 switch (hdr->cmd_status) {
1030
1031 case MFI_STAT_OK:
1032 cmd->scmd->result = DID_OK << 16;
1033 break;
1034
1035 case MFI_STAT_SCSI_IO_FAILED:
1036 case MFI_STAT_LD_INIT_IN_PROGRESS:
1037 cmd->scmd->result =
1038 (DID_ERROR << 16) | hdr->scsi_status;
1039 break;
1040
1041 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1042
1043 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
1044
1045 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
1046 memset(cmd->scmd->sense_buffer, 0,
1047 SCSI_SENSE_BUFFERSIZE);
1048 memcpy(cmd->scmd->sense_buffer, cmd->sense,
1049 hdr->sense_len);
1050
1051 cmd->scmd->result |= DRIVER_SENSE << 24;
1052 }
1053
1054 break;
1055
1056 case MFI_STAT_LD_OFFLINE:
1057 case MFI_STAT_DEVICE_NOT_FOUND:
1058 cmd->scmd->result = DID_BAD_TARGET << 16;
1059 break;
1060
1061 default:
1062 printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
1063 hdr->cmd_status);
1064 cmd->scmd->result = DID_ERROR << 16;
1065 break;
1066 }
1067
1068 spin_lock_irqsave(&instance->instance_lock, flags);
1069 instance->fw_outstanding--;
1070 spin_unlock_irqrestore(&instance->instance_lock, flags);
1071
1072 megasas_unmap_sgbuf(instance, cmd);
1073 cmd->scmd->scsi_done(cmd->scmd);
1074 megasas_return_cmd(instance, cmd);
1075
1076 break;
1077
1078 case MFI_CMD_SMP:
1079 case MFI_CMD_STP:
1080 case MFI_CMD_DCMD:
1081
1082 /*
1083 * See if got an event notification
1084 */
1085 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
1086 megasas_service_aen(instance, cmd);
1087 else
1088 megasas_complete_int_cmd(instance, cmd);
1089
1090 break;
1091
1092 case MFI_CMD_ABORT:
1093 /*
1094 * Cmd issued to abort another cmd returned
1095 */
1096 megasas_complete_abort(instance, cmd);
1097 break;
1098
1099 default:
1100 printk("megasas: Unknown command completed! [0x%X]\n",
1101 hdr->cmd);
1102 break;
1103 }
1104}
1105
1106/**
1107 * megasas_deplete_reply_queue - Processes all completed commands
1108 * @instance: Adapter soft state
1109 * @alt_status: Alternate status to be returned to
1110 * SCSI mid-layer instead of the status
1111 * returned by the FW
1112 */
1113static inline int
1114megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
1115{
1116 u32 status;
1117 u32 producer;
1118 u32 consumer;
1119 u32 context;
1120 struct megasas_cmd *cmd;
1121
1122 /*
1123 * Check if it is our interrupt
1124 */
1125 status = readl(&instance->reg_set->outbound_intr_status);
1126
1127 if (!(status & MFI_OB_INTR_STATUS_MASK)) {
1128 return IRQ_NONE;
1129 }
1130
1131 /*
1132 * Clear the interrupt by writing back the same value
1133 */
1134 writel(status, &instance->reg_set->outbound_intr_status);
1135
1136 producer = *instance->producer;
1137 consumer = *instance->consumer;
1138
1139 while (consumer != producer) {
1140 context = instance->reply_queue[consumer];
1141
1142 cmd = instance->cmd_list[context];
1143
1144 megasas_complete_cmd(instance, cmd, alt_status);
1145
1146 consumer++;
1147 if (consumer == (instance->max_fw_cmds + 1)) {
1148 consumer = 0;
1149 }
1150 }
1151
1152 *instance->consumer = producer;
1153
1154 return IRQ_HANDLED;
1155}
1156
1157/**
1158 * megasas_isr - isr entry point
1159 */
1160static irqreturn_t megasas_isr(int irq, void *devp, struct pt_regs *regs)
1161{
1162 return megasas_deplete_reply_queue((struct megasas_instance *)devp,
1163 DID_OK);
1164}
1165
1166/**
1167 * megasas_transition_to_ready - Move the FW to READY state
1168 * @reg_set: MFI register set
1169 *
1170 * During the initialization, FW passes can potentially be in any one of
1171 * several possible states. If the FW in operational, waiting-for-handshake
1172 * states, driver must take steps to bring it to ready state. Otherwise, it
1173 * has to wait for the ready state.
1174 */
1175static int
1176megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set)
1177{
1178 int i;
1179 u8 max_wait;
1180 u32 fw_state;
1181 u32 cur_state;
1182
1183 fw_state = readl(&reg_set->outbound_msg_0) & MFI_STATE_MASK;
1184
1185 while (fw_state != MFI_STATE_READY) {
1186
1187 printk(KERN_INFO "megasas: Waiting for FW to come to ready"
1188 " state\n");
1189 switch (fw_state) {
1190
1191 case MFI_STATE_FAULT:
1192
1193 printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
1194 return -ENODEV;
1195
1196 case MFI_STATE_WAIT_HANDSHAKE:
1197 /*
1198 * Set the CLR bit in inbound doorbell
1199 */
1200 writel(MFI_INIT_CLEAR_HANDSHAKE,
1201 &reg_set->inbound_doorbell);
1202
1203 max_wait = 2;
1204 cur_state = MFI_STATE_WAIT_HANDSHAKE;
1205 break;
1206
1207 case MFI_STATE_OPERATIONAL:
1208 /*
1209 * Bring it to READY state; assuming max wait 2 secs
1210 */
1211 megasas_disable_intr(reg_set);
1212 writel(MFI_INIT_READY, &reg_set->inbound_doorbell);
1213
1214 max_wait = 10;
1215 cur_state = MFI_STATE_OPERATIONAL;
1216 break;
1217
1218 case MFI_STATE_UNDEFINED:
1219 /*
1220 * This state should not last for more than 2 seconds
1221 */
1222 max_wait = 2;
1223 cur_state = MFI_STATE_UNDEFINED;
1224 break;
1225
1226 case MFI_STATE_BB_INIT:
1227 max_wait = 2;
1228 cur_state = MFI_STATE_BB_INIT;
1229 break;
1230
1231 case MFI_STATE_FW_INIT:
1232 max_wait = 20;
1233 cur_state = MFI_STATE_FW_INIT;
1234 break;
1235
1236 case MFI_STATE_FW_INIT_2:
1237 max_wait = 20;
1238 cur_state = MFI_STATE_FW_INIT_2;
1239 break;
1240
1241 case MFI_STATE_DEVICE_SCAN:
1242 max_wait = 20;
1243 cur_state = MFI_STATE_DEVICE_SCAN;
1244 break;
1245
1246 case MFI_STATE_FLUSH_CACHE:
1247 max_wait = 20;
1248 cur_state = MFI_STATE_FLUSH_CACHE;
1249 break;
1250
1251 default:
1252 printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
1253 fw_state);
1254 return -ENODEV;
1255 }
1256
1257 /*
1258 * The cur_state should not last for more than max_wait secs
1259 */
1260 for (i = 0; i < (max_wait * 1000); i++) {
1261 fw_state = MFI_STATE_MASK &
1262 readl(&reg_set->outbound_msg_0);
1263
1264 if (fw_state == cur_state) {
1265 msleep(1);
1266 } else
1267 break;
1268 }
1269
1270 /*
1271 * Return error if fw_state hasn't changed after max_wait
1272 */
1273 if (fw_state == cur_state) {
1274 printk(KERN_DEBUG "FW state [%d] hasn't changed "
1275 "in %d secs\n", fw_state, max_wait);
1276 return -ENODEV;
1277 }
1278 };
1279
1280 return 0;
1281}
1282
1283/**
1284 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
1285 * @instance: Adapter soft state
1286 */
1287static void megasas_teardown_frame_pool(struct megasas_instance *instance)
1288{
1289 int i;
1290 u32 max_cmd = instance->max_fw_cmds;
1291 struct megasas_cmd *cmd;
1292
1293 if (!instance->frame_dma_pool)
1294 return;
1295
1296 /*
1297 * Return all frames to pool
1298 */
1299 for (i = 0; i < max_cmd; i++) {
1300
1301 cmd = instance->cmd_list[i];
1302
1303 if (cmd->frame)
1304 pci_pool_free(instance->frame_dma_pool, cmd->frame,
1305 cmd->frame_phys_addr);
1306
1307 if (cmd->sense)
1308 pci_pool_free(instance->sense_dma_pool, cmd->frame,
1309 cmd->sense_phys_addr);
1310 }
1311
1312 /*
1313 * Now destroy the pool itself
1314 */
1315 pci_pool_destroy(instance->frame_dma_pool);
1316 pci_pool_destroy(instance->sense_dma_pool);
1317
1318 instance->frame_dma_pool = NULL;
1319 instance->sense_dma_pool = NULL;
1320}
1321
1322/**
1323 * megasas_create_frame_pool - Creates DMA pool for cmd frames
1324 * @instance: Adapter soft state
1325 *
1326 * Each command packet has an embedded DMA memory buffer that is used for
1327 * filling MFI frame and the SG list that immediately follows the frame. This
1328 * function creates those DMA memory buffers for each command packet by using
1329 * PCI pool facility.
1330 */
1331static int megasas_create_frame_pool(struct megasas_instance *instance)
1332{
1333 int i;
1334 u32 max_cmd;
1335 u32 sge_sz;
1336 u32 sgl_sz;
1337 u32 total_sz;
1338 u32 frame_count;
1339 struct megasas_cmd *cmd;
1340
1341 max_cmd = instance->max_fw_cmds;
1342
1343 /*
1344 * Size of our frame is 64 bytes for MFI frame, followed by max SG
1345 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
1346 */
1347 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1348 sizeof(struct megasas_sge32);
1349
1350 /*
1351 * Calculated the number of 64byte frames required for SGL
1352 */
1353 sgl_sz = sge_sz * instance->max_num_sge;
1354 frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
1355
1356 /*
1357 * We need one extra frame for the MFI command
1358 */
1359 frame_count++;
1360
1361 total_sz = MEGAMFI_FRAME_SIZE * frame_count;
1362 /*
1363 * Use DMA pool facility provided by PCI layer
1364 */
1365 instance->frame_dma_pool = pci_pool_create("megasas frame pool",
1366 instance->pdev, total_sz, 64,
1367 0);
1368
1369 if (!instance->frame_dma_pool) {
1370 printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
1371 return -ENOMEM;
1372 }
1373
1374 instance->sense_dma_pool = pci_pool_create("megasas sense pool",
1375 instance->pdev, 128, 4, 0);
1376
1377 if (!instance->sense_dma_pool) {
1378 printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
1379
1380 pci_pool_destroy(instance->frame_dma_pool);
1381 instance->frame_dma_pool = NULL;
1382
1383 return -ENOMEM;
1384 }
1385
1386 /*
1387 * Allocate and attach a frame to each of the commands in cmd_list.
1388 * By making cmd->index as the context instead of the &cmd, we can
1389 * always use 32bit context regardless of the architecture
1390 */
1391 for (i = 0; i < max_cmd; i++) {
1392
1393 cmd = instance->cmd_list[i];
1394
1395 cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
1396 GFP_KERNEL, &cmd->frame_phys_addr);
1397
1398 cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
1399 GFP_KERNEL, &cmd->sense_phys_addr);
1400
1401 /*
1402 * megasas_teardown_frame_pool() takes care of freeing
1403 * whatever has been allocated
1404 */
1405 if (!cmd->frame || !cmd->sense) {
1406 printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
1407 megasas_teardown_frame_pool(instance);
1408 return -ENOMEM;
1409 }
1410
1411 cmd->frame->io.context = cmd->index;
1412 }
1413
1414 return 0;
1415}
1416
1417/**
1418 * megasas_free_cmds - Free all the cmds in the free cmd pool
1419 * @instance: Adapter soft state
1420 */
1421static void megasas_free_cmds(struct megasas_instance *instance)
1422{
1423 int i;
1424 /* First free the MFI frame pool */
1425 megasas_teardown_frame_pool(instance);
1426
1427 /* Free all the commands in the cmd_list */
1428 for (i = 0; i < instance->max_fw_cmds; i++)
1429 kfree(instance->cmd_list[i]);
1430
1431 /* Free the cmd_list buffer itself */
1432 kfree(instance->cmd_list);
1433 instance->cmd_list = NULL;
1434
1435 INIT_LIST_HEAD(&instance->cmd_pool);
1436}
1437
1438/**
1439 * megasas_alloc_cmds - Allocates the command packets
1440 * @instance: Adapter soft state
1441 *
1442 * Each command that is issued to the FW, whether IO commands from the OS or
1443 * internal commands like IOCTLs, are wrapped in local data structure called
1444 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
1445 * the FW.
1446 *
1447 * Each frame has a 32-bit field called context (tag). This context is used
1448 * to get back the megasas_cmd from the frame when a frame gets completed in
1449 * the ISR. Typically the address of the megasas_cmd itself would be used as
1450 * the context. But we wanted to keep the differences between 32 and 64 bit
1451 * systems to the mininum. We always use 32 bit integers for the context. In
1452 * this driver, the 32 bit values are the indices into an array cmd_list.
1453 * This array is used only to look up the megasas_cmd given the context. The
1454 * free commands themselves are maintained in a linked list called cmd_pool.
1455 */
1456static int megasas_alloc_cmds(struct megasas_instance *instance)
1457{
1458 int i;
1459 int j;
1460 u32 max_cmd;
1461 struct megasas_cmd *cmd;
1462
1463 max_cmd = instance->max_fw_cmds;
1464
1465 /*
1466 * instance->cmd_list is an array of struct megasas_cmd pointers.
1467 * Allocate the dynamic array first and then allocate individual
1468 * commands.
1469 */
1470 instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd,
1471 GFP_KERNEL);
1472
1473 if (!instance->cmd_list) {
1474 printk(KERN_DEBUG "megasas: out of memory\n");
1475 return -ENOMEM;
1476 }
1477
1478 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) * max_cmd);
1479
1480 for (i = 0; i < max_cmd; i++) {
1481 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
1482 GFP_KERNEL);
1483
1484 if (!instance->cmd_list[i]) {
1485
1486 for (j = 0; j < i; j++)
1487 kfree(instance->cmd_list[j]);
1488
1489 kfree(instance->cmd_list);
1490 instance->cmd_list = NULL;
1491
1492 return -ENOMEM;
1493 }
1494 }
1495
1496 /*
1497 * Add all the commands to command pool (instance->cmd_pool)
1498 */
1499 for (i = 0; i < max_cmd; i++) {
1500 cmd = instance->cmd_list[i];
1501 memset(cmd, 0, sizeof(struct megasas_cmd));
1502 cmd->index = i;
1503 cmd->instance = instance;
1504
1505 list_add_tail(&cmd->list, &instance->cmd_pool);
1506 }
1507
1508 /*
1509 * Create a frame pool and assign one frame to each cmd
1510 */
1511 if (megasas_create_frame_pool(instance)) {
1512 printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
1513 megasas_free_cmds(instance);
1514 }
1515
1516 return 0;
1517}
1518
1519/**
1520 * megasas_get_controller_info - Returns FW's controller structure
1521 * @instance: Adapter soft state
1522 * @ctrl_info: Controller information structure
1523 *
1524 * Issues an internal command (DCMD) to get the FW's controller structure.
1525 * This information is mainly used to find out the maximum IO transfer per
1526 * command supported by the FW.
1527 */
1528static int
1529megasas_get_ctrl_info(struct megasas_instance *instance,
1530 struct megasas_ctrl_info *ctrl_info)
1531{
1532 int ret = 0;
1533 struct megasas_cmd *cmd;
1534 struct megasas_dcmd_frame *dcmd;
1535 struct megasas_ctrl_info *ci;
1536 dma_addr_t ci_h = 0;
1537
1538 cmd = megasas_get_cmd(instance);
1539
1540 if (!cmd) {
1541 printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
1542 return -ENOMEM;
1543 }
1544
1545 dcmd = &cmd->frame->dcmd;
1546
1547 ci = pci_alloc_consistent(instance->pdev,
1548 sizeof(struct megasas_ctrl_info), &ci_h);
1549
1550 if (!ci) {
1551 printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
1552 megasas_return_cmd(instance, cmd);
1553 return -ENOMEM;
1554 }
1555
1556 memset(ci, 0, sizeof(*ci));
1557 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1558
1559 dcmd->cmd = MFI_CMD_DCMD;
1560 dcmd->cmd_status = 0xFF;
1561 dcmd->sge_count = 1;
1562 dcmd->flags = MFI_FRAME_DIR_READ;
1563 dcmd->timeout = 0;
1564 dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
1565 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
1566 dcmd->sgl.sge32[0].phys_addr = ci_h;
1567 dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
1568
1569 if (!megasas_issue_polled(instance, cmd)) {
1570 ret = 0;
1571 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
1572 } else {
1573 ret = -1;
1574 }
1575
1576 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
1577 ci, ci_h);
1578
1579 megasas_return_cmd(instance, cmd);
1580 return ret;
1581}
1582
1583/**
1584 * megasas_init_mfi - Initializes the FW
1585 * @instance: Adapter soft state
1586 *
1587 * This is the main function for initializing MFI firmware.
1588 */
1589static int megasas_init_mfi(struct megasas_instance *instance)
1590{
1591 u32 context_sz;
1592 u32 reply_q_sz;
1593 u32 max_sectors_1;
1594 u32 max_sectors_2;
1595 struct megasas_register_set __iomem *reg_set;
1596
1597 struct megasas_cmd *cmd;
1598 struct megasas_ctrl_info *ctrl_info;
1599
1600 struct megasas_init_frame *init_frame;
1601 struct megasas_init_queue_info *initq_info;
1602 dma_addr_t init_frame_h;
1603 dma_addr_t initq_info_h;
1604
1605 /*
1606 * Map the message registers
1607 */
1608 instance->base_addr = pci_resource_start(instance->pdev, 0);
1609
1610 if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) {
1611 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
1612 return -EBUSY;
1613 }
1614
1615 instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
1616
1617 if (!instance->reg_set) {
1618 printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
1619 goto fail_ioremap;
1620 }
1621
1622 reg_set = instance->reg_set;
1623
1624 /*
1625 * We expect the FW state to be READY
1626 */
1627 if (megasas_transition_to_ready(instance->reg_set))
1628 goto fail_ready_state;
1629
1630 /*
1631 * Get various operational parameters from status register
1632 */
1633 instance->max_fw_cmds = readl(&reg_set->outbound_msg_0) & 0x00FFFF;
1634 instance->max_num_sge = (readl(&reg_set->outbound_msg_0) & 0xFF0000) >>
1635 0x10;
1636 /*
1637 * Create a pool of commands
1638 */
1639 if (megasas_alloc_cmds(instance))
1640 goto fail_alloc_cmds;
1641
1642 /*
1643 * Allocate memory for reply queue. Length of reply queue should
1644 * be _one_ more than the maximum commands handled by the firmware.
1645 *
1646 * Note: When FW completes commands, it places corresponding contex
1647 * values in this circular reply queue. This circular queue is a fairly
1648 * typical producer-consumer queue. FW is the producer (of completed
1649 * commands) and the driver is the consumer.
1650 */
1651 context_sz = sizeof(u32);
1652 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
1653
1654 instance->reply_queue = pci_alloc_consistent(instance->pdev,
1655 reply_q_sz,
1656 &instance->reply_queue_h);
1657
1658 if (!instance->reply_queue) {
1659 printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
1660 goto fail_reply_queue;
1661 }
1662
1663 /*
1664 * Prepare a init frame. Note the init frame points to queue info
1665 * structure. Each frame has SGL allocated after first 64 bytes. For
1666 * this frame - since we don't need any SGL - we use SGL's space as
1667 * queue info structure
1668 *
1669 * We will not get a NULL command below. We just created the pool.
1670 */
1671 cmd = megasas_get_cmd(instance);
1672
1673 init_frame = (struct megasas_init_frame *)cmd->frame;
1674 initq_info = (struct megasas_init_queue_info *)
1675 ((unsigned long)init_frame + 64);
1676
1677 init_frame_h = cmd->frame_phys_addr;
1678 initq_info_h = init_frame_h + 64;
1679
1680 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
1681 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
1682
1683 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
1684 initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
1685
1686 initq_info->producer_index_phys_addr_lo = instance->producer_h;
1687 initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
1688
1689 init_frame->cmd = MFI_CMD_INIT;
1690 init_frame->cmd_status = 0xFF;
1691 init_frame->queue_info_new_phys_addr_lo = initq_info_h;
1692
1693 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
1694
1695 /*
1696 * Issue the init frame in polled mode
1697 */
1698 if (megasas_issue_polled(instance, cmd)) {
1699 printk(KERN_DEBUG "megasas: Failed to init firmware\n");
1700 goto fail_fw_init;
1701 }
1702
1703 megasas_return_cmd(instance, cmd);
1704
1705 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
1706
1707 /*
1708 * Compute the max allowed sectors per IO: The controller info has two
1709 * limits on max sectors. Driver should use the minimum of these two.
1710 *
1711 * 1 << stripe_sz_ops.min = max sectors per strip
1712 *
1713 * Note that older firmwares ( < FW ver 30) didn't report information
1714 * to calculate max_sectors_1. So the number ended up as zero always.
1715 */
1716 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
1717
1718 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1719 ctrl_info->max_strips_per_io;
1720 max_sectors_2 = ctrl_info->max_request_size;
1721
1722 instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2)
1723 ? max_sectors_1 : max_sectors_2;
1724 } else
1725 instance->max_sectors_per_req = instance->max_num_sge *
1726 PAGE_SIZE / 512;
1727
1728 kfree(ctrl_info);
1729
1730 return 0;
1731
1732 fail_fw_init:
1733 megasas_return_cmd(instance, cmd);
1734
1735 pci_free_consistent(instance->pdev, reply_q_sz,
1736 instance->reply_queue, instance->reply_queue_h);
1737 fail_reply_queue:
1738 megasas_free_cmds(instance);
1739
1740 fail_alloc_cmds:
1741 fail_ready_state:
1742 iounmap(instance->reg_set);
1743
1744 fail_ioremap:
1745 pci_release_regions(instance->pdev);
1746
1747 return -EINVAL;
1748}
1749
1750/**
1751 * megasas_release_mfi - Reverses the FW initialization
1752 * @intance: Adapter soft state
1753 */
1754static void megasas_release_mfi(struct megasas_instance *instance)
1755{
1756 u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1);
1757
1758 pci_free_consistent(instance->pdev, reply_q_sz,
1759 instance->reply_queue, instance->reply_queue_h);
1760
1761 megasas_free_cmds(instance);
1762
1763 iounmap(instance->reg_set);
1764
1765 pci_release_regions(instance->pdev);
1766}
1767
1768/**
1769 * megasas_get_seq_num - Gets latest event sequence numbers
1770 * @instance: Adapter soft state
1771 * @eli: FW event log sequence numbers information
1772 *
1773 * FW maintains a log of all events in a non-volatile area. Upper layers would
1774 * usually find out the latest sequence number of the events, the seq number at
1775 * the boot etc. They would "read" all the events below the latest seq number
1776 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
1777 * number), they would subsribe to AEN (asynchronous event notification) and
1778 * wait for the events to happen.
1779 */
1780static int
1781megasas_get_seq_num(struct megasas_instance *instance,
1782 struct megasas_evt_log_info *eli)
1783{
1784 struct megasas_cmd *cmd;
1785 struct megasas_dcmd_frame *dcmd;
1786 struct megasas_evt_log_info *el_info;
1787 dma_addr_t el_info_h = 0;
1788
1789 cmd = megasas_get_cmd(instance);
1790
1791 if (!cmd) {
1792 return -ENOMEM;
1793 }
1794
1795 dcmd = &cmd->frame->dcmd;
1796 el_info = pci_alloc_consistent(instance->pdev,
1797 sizeof(struct megasas_evt_log_info),
1798 &el_info_h);
1799
1800 if (!el_info) {
1801 megasas_return_cmd(instance, cmd);
1802 return -ENOMEM;
1803 }
1804
1805 memset(el_info, 0, sizeof(*el_info));
1806 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1807
1808 dcmd->cmd = MFI_CMD_DCMD;
1809 dcmd->cmd_status = 0x0;
1810 dcmd->sge_count = 1;
1811 dcmd->flags = MFI_FRAME_DIR_READ;
1812 dcmd->timeout = 0;
1813 dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
1814 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
1815 dcmd->sgl.sge32[0].phys_addr = el_info_h;
1816 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
1817
1818 megasas_issue_blocked_cmd(instance, cmd);
1819
1820 /*
1821 * Copy the data back into callers buffer
1822 */
1823 memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
1824
1825 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
1826 el_info, el_info_h);
1827
1828 megasas_return_cmd(instance, cmd);
1829
1830 return 0;
1831}
1832
1833/**
1834 * megasas_register_aen - Registers for asynchronous event notification
1835 * @instance: Adapter soft state
1836 * @seq_num: The starting sequence number
1837 * @class_locale: Class of the event
1838 *
1839 * This function subscribes for AEN for events beyond the @seq_num. It requests
1840 * to be notified if and only if the event is of type @class_locale
1841 */
1842static int
1843megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
1844 u32 class_locale_word)
1845{
1846 int ret_val;
1847 struct megasas_cmd *cmd;
1848 struct megasas_dcmd_frame *dcmd;
1849 union megasas_evt_class_locale curr_aen;
1850 union megasas_evt_class_locale prev_aen;
1851
1852 /*
1853 * If there an AEN pending already (aen_cmd), check if the
1854 * class_locale of that pending AEN is inclusive of the new
1855 * AEN request we currently have. If it is, then we don't have
1856 * to do anything. In other words, whichever events the current
1857 * AEN request is subscribing to, have already been subscribed
1858 * to.
1859 *
1860 * If the old_cmd is _not_ inclusive, then we have to abort
1861 * that command, form a class_locale that is superset of both
1862 * old and current and re-issue to the FW
1863 */
1864
1865 curr_aen.word = class_locale_word;
1866
1867 if (instance->aen_cmd) {
1868
1869 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
1870
1871 /*
1872 * A class whose enum value is smaller is inclusive of all
1873 * higher values. If a PROGRESS (= -1) was previously
1874 * registered, then a new registration requests for higher
1875 * classes need not be sent to FW. They are automatically
1876 * included.
1877 *
1878 * Locale numbers don't have such hierarchy. They are bitmap
1879 * values
1880 */
1881 if ((prev_aen.members.class <= curr_aen.members.class) &&
1882 !((prev_aen.members.locale & curr_aen.members.locale) ^
1883 curr_aen.members.locale)) {
1884 /*
1885 * Previously issued event registration includes
1886 * current request. Nothing to do.
1887 */
1888 return 0;
1889 } else {
1890 curr_aen.members.locale |= prev_aen.members.locale;
1891
1892 if (prev_aen.members.class < curr_aen.members.class)
1893 curr_aen.members.class = prev_aen.members.class;
1894
1895 instance->aen_cmd->abort_aen = 1;
1896 ret_val = megasas_issue_blocked_abort_cmd(instance,
1897 instance->
1898 aen_cmd);
1899
1900 if (ret_val) {
1901 printk(KERN_DEBUG "megasas: Failed to abort "
1902 "previous AEN command\n");
1903 return ret_val;
1904 }
1905 }
1906 }
1907
1908 cmd = megasas_get_cmd(instance);
1909
1910 if (!cmd)
1911 return -ENOMEM;
1912
1913 dcmd = &cmd->frame->dcmd;
1914
1915 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
1916
1917 /*
1918 * Prepare DCMD for aen registration
1919 */
1920 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1921
1922 dcmd->cmd = MFI_CMD_DCMD;
1923 dcmd->cmd_status = 0x0;
1924 dcmd->sge_count = 1;
1925 dcmd->flags = MFI_FRAME_DIR_READ;
1926 dcmd->timeout = 0;
1927 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
1928 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
1929 dcmd->mbox.w[0] = seq_num;
1930 dcmd->mbox.w[1] = curr_aen.word;
1931 dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
1932 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
1933
1934 /*
1935 * Store reference to the cmd used to register for AEN. When an
1936 * application wants us to register for AEN, we have to abort this
1937 * cmd and re-register with a new EVENT LOCALE supplied by that app
1938 */
1939 instance->aen_cmd = cmd;
1940
1941 /*
1942 * Issue the aen registration frame
1943 */
1944 writel(cmd->frame_phys_addr >> 3,
1945 &instance->reg_set->inbound_queue_port);
1946
1947 return 0;
1948}
1949
1950/**
1951 * megasas_start_aen - Subscribes to AEN during driver load time
1952 * @instance: Adapter soft state
1953 */
1954static int megasas_start_aen(struct megasas_instance *instance)
1955{
1956 struct megasas_evt_log_info eli;
1957 union megasas_evt_class_locale class_locale;
1958
1959 /*
1960 * Get the latest sequence number from FW
1961 */
1962 memset(&eli, 0, sizeof(eli));
1963
1964 if (megasas_get_seq_num(instance, &eli))
1965 return -1;
1966
1967 /*
1968 * Register AEN with FW for latest sequence number plus 1
1969 */
1970 class_locale.members.reserved = 0;
1971 class_locale.members.locale = MR_EVT_LOCALE_ALL;
1972 class_locale.members.class = MR_EVT_CLASS_DEBUG;
1973
1974 return megasas_register_aen(instance, eli.newest_seq_num + 1,
1975 class_locale.word);
1976}
1977
1978/**
1979 * megasas_io_attach - Attaches this driver to SCSI mid-layer
1980 * @instance: Adapter soft state
1981 */
1982static int megasas_io_attach(struct megasas_instance *instance)
1983{
1984 struct Scsi_Host *host = instance->host;
1985
1986 /*
1987 * Export parameters required by SCSI mid-layer
1988 */
1989 host->irq = instance->pdev->irq;
1990 host->unique_id = instance->unique_id;
1991 host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS;
1992 host->this_id = instance->init_id;
1993 host->sg_tablesize = instance->max_num_sge;
1994 host->max_sectors = instance->max_sectors_per_req;
1995 host->cmd_per_lun = 128;
1996 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
1997 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
1998 host->max_lun = MEGASAS_MAX_LUN;
1999
2000 /*
2001 * Notify the mid-layer about the new controller
2002 */
2003 if (scsi_add_host(host, &instance->pdev->dev)) {
2004 printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
2005 return -ENODEV;
2006 }
2007
2008 /*
2009 * Trigger SCSI to scan our drives
2010 */
2011 scsi_scan_host(host);
2012 return 0;
2013}
2014
2015/**
2016 * megasas_probe_one - PCI hotplug entry point
2017 * @pdev: PCI device structure
2018 * @id: PCI ids of supported hotplugged adapter
2019 */
2020static int __devinit
2021megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2022{
2023 int rval;
2024 struct Scsi_Host *host;
2025 struct megasas_instance *instance;
2026
2027 /*
2028 * Announce PCI information
2029 */
2030 printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2031 pdev->vendor, pdev->device, pdev->subsystem_vendor,
2032 pdev->subsystem_device);
2033
2034 printk("bus %d:slot %d:func %d\n",
2035 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2036
2037 /*
2038 * PCI prepping: enable device set bus mastering and dma mask
2039 */
2040 rval = pci_enable_device(pdev);
2041
2042 if (rval) {
2043 return rval;
2044 }
2045
2046 pci_set_master(pdev);
2047
2048 /*
2049 * All our contollers are capable of performing 64-bit DMA
2050 */
2051 if (IS_DMA64) {
2052 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
2053
2054 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2055 goto fail_set_dma_mask;
2056 }
2057 } else {
2058 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2059 goto fail_set_dma_mask;
2060 }
2061
2062 host = scsi_host_alloc(&megasas_template,
2063 sizeof(struct megasas_instance));
2064
2065 if (!host) {
2066 printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
2067 goto fail_alloc_instance;
2068 }
2069
2070 instance = (struct megasas_instance *)host->hostdata;
2071 memset(instance, 0, sizeof(*instance));
2072
2073 instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
2074 &instance->producer_h);
2075 instance->consumer = pci_alloc_consistent(pdev, sizeof(u32),
2076 &instance->consumer_h);
2077
2078 if (!instance->producer || !instance->consumer) {
2079 printk(KERN_DEBUG "megasas: Failed to allocate memory for "
2080 "producer, consumer\n");
2081 goto fail_alloc_dma_buf;
2082 }
2083
2084 *instance->producer = 0;
2085 *instance->consumer = 0;
2086
2087 instance->evt_detail = pci_alloc_consistent(pdev,
2088 sizeof(struct
2089 megasas_evt_detail),
2090 &instance->evt_detail_h);
2091
2092 if (!instance->evt_detail) {
2093 printk(KERN_DEBUG "megasas: Failed to allocate memory for "
2094 "event detail structure\n");
2095 goto fail_alloc_dma_buf;
2096 }
2097
2098 /*
2099 * Initialize locks and queues
2100 */
2101 INIT_LIST_HEAD(&instance->cmd_pool);
2102
2103 init_waitqueue_head(&instance->int_cmd_wait_q);
2104 init_waitqueue_head(&instance->abort_cmd_wait_q);
2105
2106 spin_lock_init(&instance->cmd_pool_lock);
2107 spin_lock_init(&instance->instance_lock);
2108
2109 sema_init(&instance->aen_mutex, 1);
2110 sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
2111
2112 /*
2113 * Initialize PCI related and misc parameters
2114 */
2115 instance->pdev = pdev;
2116 instance->host = host;
2117 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
2118 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
2119
2120 /*
2121 * Initialize MFI Firmware
2122 */
2123 if (megasas_init_mfi(instance))
2124 goto fail_init_mfi;
2125
2126 /*
2127 * Register IRQ
2128 */
2129 if (request_irq(pdev->irq, megasas_isr, SA_SHIRQ, "megasas", instance)) {
2130 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
2131 goto fail_irq;
2132 }
2133
2134 megasas_enable_intr(instance->reg_set);
2135
2136 /*
2137 * Store instance in PCI softstate
2138 */
2139 pci_set_drvdata(pdev, instance);
2140
2141 /*
2142 * Add this controller to megasas_mgmt_info structure so that it
2143 * can be exported to management applications
2144 */
2145 megasas_mgmt_info.count++;
2146 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
2147 megasas_mgmt_info.max_index++;
2148
2149 /*
2150 * Initiate AEN (Asynchronous Event Notification)
2151 */
2152 if (megasas_start_aen(instance)) {
2153 printk(KERN_DEBUG "megasas: start aen failed\n");
2154 goto fail_start_aen;
2155 }
2156
2157 /*
2158 * Register with SCSI mid-layer
2159 */
2160 if (megasas_io_attach(instance))
2161 goto fail_io_attach;
2162
2163 return 0;
2164
2165 fail_start_aen:
2166 fail_io_attach:
2167 megasas_mgmt_info.count--;
2168 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
2169 megasas_mgmt_info.max_index--;
2170
2171 pci_set_drvdata(pdev, NULL);
2172 megasas_disable_intr(instance->reg_set);
2173 free_irq(instance->pdev->irq, instance);
2174
2175 megasas_release_mfi(instance);
2176
2177 fail_irq:
2178 fail_init_mfi:
2179 fail_alloc_dma_buf:
2180 if (instance->evt_detail)
2181 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
2182 instance->evt_detail,
2183 instance->evt_detail_h);
2184
2185 if (instance->producer)
2186 pci_free_consistent(pdev, sizeof(u32), instance->producer,
2187 instance->producer_h);
2188 if (instance->consumer)
2189 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
2190 instance->consumer_h);
2191 scsi_host_put(host);
2192
2193 fail_alloc_instance:
2194 fail_set_dma_mask:
2195 pci_disable_device(pdev);
2196
2197 return -ENODEV;
2198}
2199
2200/**
2201 * megasas_flush_cache - Requests FW to flush all its caches
2202 * @instance: Adapter soft state
2203 */
2204static void megasas_flush_cache(struct megasas_instance *instance)
2205{
2206 struct megasas_cmd *cmd;
2207 struct megasas_dcmd_frame *dcmd;
2208
2209 cmd = megasas_get_cmd(instance);
2210
2211 if (!cmd)
2212 return;
2213
2214 dcmd = &cmd->frame->dcmd;
2215
2216 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2217
2218 dcmd->cmd = MFI_CMD_DCMD;
2219 dcmd->cmd_status = 0x0;
2220 dcmd->sge_count = 0;
2221 dcmd->flags = MFI_FRAME_DIR_NONE;
2222 dcmd->timeout = 0;
2223 dcmd->data_xfer_len = 0;
2224 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2225 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2226
2227 megasas_issue_blocked_cmd(instance, cmd);
2228
2229 megasas_return_cmd(instance, cmd);
2230
2231 return;
2232}
2233
2234/**
2235 * megasas_shutdown_controller - Instructs FW to shutdown the controller
2236 * @instance: Adapter soft state
2237 */
2238static void megasas_shutdown_controller(struct megasas_instance *instance)
2239{
2240 struct megasas_cmd *cmd;
2241 struct megasas_dcmd_frame *dcmd;
2242
2243 cmd = megasas_get_cmd(instance);
2244
2245 if (!cmd)
2246 return;
2247
2248 if (instance->aen_cmd)
2249 megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
2250
2251 dcmd = &cmd->frame->dcmd;
2252
2253 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2254
2255 dcmd->cmd = MFI_CMD_DCMD;
2256 dcmd->cmd_status = 0x0;
2257 dcmd->sge_count = 0;
2258 dcmd->flags = MFI_FRAME_DIR_NONE;
2259 dcmd->timeout = 0;
2260 dcmd->data_xfer_len = 0;
2261 dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN;
2262
2263 megasas_issue_blocked_cmd(instance, cmd);
2264
2265 megasas_return_cmd(instance, cmd);
2266
2267 return;
2268}
2269
2270/**
2271 * megasas_detach_one - PCI hot"un"plug entry point
2272 * @pdev: PCI device structure
2273 */
2274static void megasas_detach_one(struct pci_dev *pdev)
2275{
2276 int i;
2277 struct Scsi_Host *host;
2278 struct megasas_instance *instance;
2279
2280 instance = pci_get_drvdata(pdev);
2281 host = instance->host;
2282
2283 scsi_remove_host(instance->host);
2284 megasas_flush_cache(instance);
2285 megasas_shutdown_controller(instance);
2286
2287 /*
2288 * Take the instance off the instance array. Note that we will not
2289 * decrement the max_index. We let this array be sparse array
2290 */
2291 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
2292 if (megasas_mgmt_info.instance[i] == instance) {
2293 megasas_mgmt_info.count--;
2294 megasas_mgmt_info.instance[i] = NULL;
2295
2296 break;
2297 }
2298 }
2299
2300 pci_set_drvdata(instance->pdev, NULL);
2301
2302 megasas_disable_intr(instance->reg_set);
2303
2304 free_irq(instance->pdev->irq, instance);
2305
2306 megasas_release_mfi(instance);
2307
2308 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
2309 instance->evt_detail, instance->evt_detail_h);
2310
2311 pci_free_consistent(pdev, sizeof(u32), instance->producer,
2312 instance->producer_h);
2313
2314 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
2315 instance->consumer_h);
2316
2317 scsi_host_put(host);
2318
2319 pci_set_drvdata(pdev, NULL);
2320
2321 pci_disable_device(pdev);
2322
2323 return;
2324}
2325
2326/**
2327 * megasas_shutdown - Shutdown entry point
2328 * @device: Generic device structure
2329 */
2330static void megasas_shutdown(struct pci_dev *pdev)
2331{
2332 struct megasas_instance *instance = pci_get_drvdata(pdev);
2333 megasas_flush_cache(instance);
2334}
2335
2336/**
2337 * megasas_mgmt_open - char node "open" entry point
2338 */
2339static int megasas_mgmt_open(struct inode *inode, struct file *filep)
2340{
2341 /*
2342 * Allow only those users with admin rights
2343 */
2344 if (!capable(CAP_SYS_ADMIN))
2345 return -EACCES;
2346
2347 return 0;
2348}
2349
2350/**
2351 * megasas_mgmt_release - char node "release" entry point
2352 */
2353static int megasas_mgmt_release(struct inode *inode, struct file *filep)
2354{
2355 filep->private_data = NULL;
2356 fasync_helper(-1, filep, 0, &megasas_async_queue);
2357
2358 return 0;
2359}
2360
2361/**
2362 * megasas_mgmt_fasync - Async notifier registration from applications
2363 *
2364 * This function adds the calling process to a driver global queue. When an
2365 * event occurs, SIGIO will be sent to all processes in this queue.
2366 */
2367static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
2368{
2369 int rc;
2370
2371 down(&megasas_async_queue_mutex);
2372
2373 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
2374
2375 up(&megasas_async_queue_mutex);
2376
2377 if (rc >= 0) {
2378 /* For sanity check when we get ioctl */
2379 filep->private_data = filep;
2380 return 0;
2381 }
2382
2383 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
2384
2385 return rc;
2386}
2387
2388/**
2389 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
2390 * @instance: Adapter soft state
2391 * @argp: User's ioctl packet
2392 */
2393static int
2394megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
2395 struct megasas_iocpacket __user * user_ioc,
2396 struct megasas_iocpacket *ioc)
2397{
2398 struct megasas_sge32 *kern_sge32;
2399 struct megasas_cmd *cmd;
2400 void *kbuff_arr[MAX_IOCTL_SGE];
2401 dma_addr_t buf_handle = 0;
2402 int error = 0, i;
2403 void *sense = NULL;
2404 dma_addr_t sense_handle;
2405 u32 *sense_ptr;
2406
2407 memset(kbuff_arr, 0, sizeof(kbuff_arr));
2408
2409 if (ioc->sge_count > MAX_IOCTL_SGE) {
2410 printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
2411 ioc->sge_count, MAX_IOCTL_SGE);
2412 return -EINVAL;
2413 }
2414
2415 cmd = megasas_get_cmd(instance);
2416 if (!cmd) {
2417 printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
2418 return -ENOMEM;
2419 }
2420
2421 /*
2422 * User's IOCTL packet has 2 frames (maximum). Copy those two
2423 * frames into our cmd's frames. cmd->frame's context will get
2424 * overwritten when we copy from user's frames. So set that value
2425 * alone separately
2426 */
2427 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
2428 cmd->frame->hdr.context = cmd->index;
2429
2430 /*
2431 * The management interface between applications and the fw uses
2432 * MFI frames. E.g, RAID configuration changes, LD property changes
2433 * etc are accomplishes through different kinds of MFI frames. The
2434 * driver needs to care only about substituting user buffers with
2435 * kernel buffers in SGLs. The location of SGL is embedded in the
2436 * struct iocpacket itself.
2437 */
2438 kern_sge32 = (struct megasas_sge32 *)
2439 ((unsigned long)cmd->frame + ioc->sgl_off);
2440
2441 /*
2442 * For each user buffer, create a mirror buffer and copy in
2443 */
2444 for (i = 0; i < ioc->sge_count; i++) {
2445 kbuff_arr[i] = pci_alloc_consistent(instance->pdev,
2446 ioc->sgl[i].iov_len,
2447 &buf_handle);
2448 if (!kbuff_arr[i]) {
2449 printk(KERN_DEBUG "megasas: Failed to alloc "
2450 "kernel SGL buffer for IOCTL \n");
2451 error = -ENOMEM;
2452 goto out;
2453 }
2454
2455 /*
2456 * We don't change the dma_coherent_mask, so
2457 * pci_alloc_consistent only returns 32bit addresses
2458 */
2459 kern_sge32[i].phys_addr = (u32) buf_handle;
2460 kern_sge32[i].length = ioc->sgl[i].iov_len;
2461
2462 /*
2463 * We created a kernel buffer corresponding to the
2464 * user buffer. Now copy in from the user buffer
2465 */
2466 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
2467 (u32) (ioc->sgl[i].iov_len))) {
2468 error = -EFAULT;
2469 goto out;
2470 }
2471 }
2472
2473 if (ioc->sense_len) {
2474 sense = pci_alloc_consistent(instance->pdev, ioc->sense_len,
2475 &sense_handle);
2476 if (!sense) {
2477 error = -ENOMEM;
2478 goto out;
2479 }
2480
2481 sense_ptr =
2482 (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
2483 *sense_ptr = sense_handle;
2484 }
2485
2486 /*
2487 * Set the sync_cmd flag so that the ISR knows not to complete this
2488 * cmd to the SCSI mid-layer
2489 */
2490 cmd->sync_cmd = 1;
2491 megasas_issue_blocked_cmd(instance, cmd);
2492 cmd->sync_cmd = 0;
2493
2494 /*
2495 * copy out the kernel buffers to user buffers
2496 */
2497 for (i = 0; i < ioc->sge_count; i++) {
2498 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
2499 ioc->sgl[i].iov_len)) {
2500 error = -EFAULT;
2501 goto out;
2502 }
2503 }
2504
2505 /*
2506 * copy out the sense
2507 */
2508 if (ioc->sense_len) {
2509 /*
2510 * sense_ptr points to the location that has the user
2511 * sense buffer address
2512 */
2513 sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
2514 ioc->sense_off);
2515
2516 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
2517 sense, ioc->sense_len)) {
2518 error = -EFAULT;
2519 goto out;
2520 }
2521 }
2522
2523 /*
2524 * copy the status codes returned by the fw
2525 */
2526 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
2527 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
2528 printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
2529 error = -EFAULT;
2530 }
2531
2532 out:
2533 if (sense) {
2534 pci_free_consistent(instance->pdev, ioc->sense_len,
2535 sense, sense_handle);
2536 }
2537
2538 for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
2539 pci_free_consistent(instance->pdev,
2540 kern_sge32[i].length,
2541 kbuff_arr[i], kern_sge32[i].phys_addr);
2542 }
2543
2544 megasas_return_cmd(instance, cmd);
2545 return error;
2546}
2547
2548static struct megasas_instance *megasas_lookup_instance(u16 host_no)
2549{
2550 int i;
2551
2552 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
2553
2554 if ((megasas_mgmt_info.instance[i]) &&
2555 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
2556 return megasas_mgmt_info.instance[i];
2557 }
2558
2559 return NULL;
2560}
2561
2562static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
2563{
2564 struct megasas_iocpacket __user *user_ioc =
2565 (struct megasas_iocpacket __user *)arg;
2566 struct megasas_iocpacket *ioc;
2567 struct megasas_instance *instance;
2568 int error;
2569
2570 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
2571 if (!ioc)
2572 return -ENOMEM;
2573
2574 if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
2575 error = -EFAULT;
2576 goto out_kfree_ioc;
2577 }
2578
2579 instance = megasas_lookup_instance(ioc->host_no);
2580 if (!instance) {
2581 error = -ENODEV;
2582 goto out_kfree_ioc;
2583 }
2584
2585 /*
2586 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
2587 */
2588 if (down_interruptible(&instance->ioctl_sem)) {
2589 error = -ERESTARTSYS;
2590 goto out_kfree_ioc;
2591 }
2592 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
2593 up(&instance->ioctl_sem);
2594
2595 out_kfree_ioc:
2596 kfree(ioc);
2597 return error;
2598}
2599
2600static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
2601{
2602 struct megasas_instance *instance;
2603 struct megasas_aen aen;
2604 int error;
2605
2606 if (file->private_data != file) {
2607 printk(KERN_DEBUG "megasas: fasync_helper was not "
2608 "called first\n");
2609 return -EINVAL;
2610 }
2611
2612 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
2613 return -EFAULT;
2614
2615 instance = megasas_lookup_instance(aen.host_no);
2616
2617 if (!instance)
2618 return -ENODEV;
2619
2620 down(&instance->aen_mutex);
2621 error = megasas_register_aen(instance, aen.seq_num,
2622 aen.class_locale_word);
2623 up(&instance->aen_mutex);
2624 return error;
2625}
2626
2627/**
2628 * megasas_mgmt_ioctl - char node ioctl entry point
2629 */
2630static long
2631megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2632{
2633 switch (cmd) {
2634 case MEGASAS_IOC_FIRMWARE:
2635 return megasas_mgmt_ioctl_fw(file, arg);
2636
2637 case MEGASAS_IOC_GET_AEN:
2638 return megasas_mgmt_ioctl_aen(file, arg);
2639 }
2640
2641 return -ENOTTY;
2642}
2643
2644#ifdef CONFIG_COMPAT
2645static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
2646{
2647 struct compat_megasas_iocpacket __user *cioc =
2648 (struct compat_megasas_iocpacket __user *)arg;
2649 struct megasas_iocpacket __user *ioc =
2650 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
2651 int i;
2652 int error = 0;
2653
2654 clear_user(ioc, sizeof(*ioc));
2655
2656 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
2657 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
2658 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
2659 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
2660 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
2661 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
2662 return -EFAULT;
2663
2664 for (i = 0; i < MAX_IOCTL_SGE; i++) {
2665 compat_uptr_t ptr;
2666
2667 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
2668 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
2669 copy_in_user(&ioc->sgl[i].iov_len,
2670 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
2671 return -EFAULT;
2672 }
2673
2674 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
2675
2676 if (copy_in_user(&cioc->frame.hdr.cmd_status,
2677 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
2678 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
2679 return -EFAULT;
2680 }
2681 return error;
2682}
2683
2684static long
2685megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
2686 unsigned long arg)
2687{
2688 switch (cmd) {
2689 case MEGASAS_IOC_FIRMWARE:{
2690 return megasas_mgmt_compat_ioctl_fw(file, arg);
2691 }
2692 case MEGASAS_IOC_GET_AEN:
2693 return megasas_mgmt_ioctl_aen(file, arg);
2694 }
2695
2696 return -ENOTTY;
2697}
2698#endif
2699
2700/*
2701 * File operations structure for management interface
2702 */
2703static struct file_operations megasas_mgmt_fops = {
2704 .owner = THIS_MODULE,
2705 .open = megasas_mgmt_open,
2706 .release = megasas_mgmt_release,
2707 .fasync = megasas_mgmt_fasync,
2708 .unlocked_ioctl = megasas_mgmt_ioctl,
2709#ifdef CONFIG_COMPAT
2710 .compat_ioctl = megasas_mgmt_compat_ioctl,
2711#endif
2712};
2713
2714/*
2715 * PCI hotplug support registration structure
2716 */
2717static struct pci_driver megasas_pci_driver = {
2718
2719 .name = "megaraid_sas",
2720 .id_table = megasas_pci_table,
2721 .probe = megasas_probe_one,
2722 .remove = __devexit_p(megasas_detach_one),
2723 .shutdown = megasas_shutdown,
2724};
2725
2726/*
2727 * Sysfs driver attributes
2728 */
2729static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
2730{
2731 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
2732 MEGASAS_VERSION);
2733}
2734
2735static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
2736
2737static ssize_t
2738megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
2739{
2740 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
2741 MEGASAS_RELDATE);
2742}
2743
2744static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
2745 NULL);
2746
2747/**
2748 * megasas_init - Driver load entry point
2749 */
2750static int __init megasas_init(void)
2751{
2752 int rval;
2753
2754 /*
2755 * Announce driver version and other information
2756 */
2757 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
2758 MEGASAS_EXT_VERSION);
2759
2760 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
2761
2762 /*
2763 * Register character device node
2764 */
2765 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
2766
2767 if (rval < 0) {
2768 printk(KERN_DEBUG "megasas: failed to open device node\n");
2769 return rval;
2770 }
2771
2772 megasas_mgmt_majorno = rval;
2773
2774 /*
2775 * Register ourselves as PCI hotplug module
2776 */
2777 rval = pci_module_init(&megasas_pci_driver);
2778
2779 if (rval) {
2780 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
2781 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
2782 }
2783
2784 driver_create_file(&megasas_pci_driver.driver, &driver_attr_version);
2785 driver_create_file(&megasas_pci_driver.driver,
2786 &driver_attr_release_date);
2787
2788 return rval;
2789}
2790
2791/**
2792 * megasas_exit - Driver unload entry point
2793 */
2794static void __exit megasas_exit(void)
2795{
2796 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
2797 driver_remove_file(&megasas_pci_driver.driver,
2798 &driver_attr_release_date);
2799
2800 pci_unregister_driver(&megasas_pci_driver);
2801 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
2802}
2803
2804module_init(megasas_init);
2805module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
new file mode 100644
index 000000000000..eaec9d531424
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -0,0 +1,1142 @@
1/*
2 *
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2003-2005 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_sas.h
13 */
14
15#ifndef LSI_MEGARAID_SAS_H
16#define LSI_MEGARAID_SAS_H
17
18/**
19 * MegaRAID SAS Driver meta data
20 */
21#define MEGASAS_VERSION "00.00.02.00-rc4"
22#define MEGASAS_RELDATE "Sep 16, 2005"
23#define MEGASAS_EXT_VERSION "Fri Sep 16 12:37:08 EDT 2005"
24
25/*
26 * =====================================
27 * MegaRAID SAS MFI firmware definitions
28 * =====================================
29 */
30
31/*
32 * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
33 * protocol between the software and firmware. Commands are issued using
34 * "message frames"
35 */
36
37/**
38 * FW posts its state in upper 4 bits of outbound_msg_0 register
39 */
40#define MFI_STATE_MASK 0xF0000000
41#define MFI_STATE_UNDEFINED 0x00000000
42#define MFI_STATE_BB_INIT 0x10000000
43#define MFI_STATE_FW_INIT 0x40000000
44#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
45#define MFI_STATE_FW_INIT_2 0x70000000
46#define MFI_STATE_DEVICE_SCAN 0x80000000
47#define MFI_STATE_FLUSH_CACHE 0xA0000000
48#define MFI_STATE_READY 0xB0000000
49#define MFI_STATE_OPERATIONAL 0xC0000000
50#define MFI_STATE_FAULT 0xF0000000
51
52#define MEGAMFI_FRAME_SIZE 64
53
54/**
55 * During FW init, clear pending cmds & reset state using inbound_msg_0
56 *
57 * ABORT : Abort all pending cmds
58 * READY : Move from OPERATIONAL to READY state; discard queue info
59 * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
60 * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
61 */
62#define MFI_INIT_ABORT 0x00000000
63#define MFI_INIT_READY 0x00000002
64#define MFI_INIT_MFIMODE 0x00000004
65#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
66#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE
67
68/**
69 * MFI frame flags
70 */
71#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
72#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
73#define MFI_FRAME_SGL32 0x0000
74#define MFI_FRAME_SGL64 0x0002
75#define MFI_FRAME_SENSE32 0x0000
76#define MFI_FRAME_SENSE64 0x0004
77#define MFI_FRAME_DIR_NONE 0x0000
78#define MFI_FRAME_DIR_WRITE 0x0008
79#define MFI_FRAME_DIR_READ 0x0010
80#define MFI_FRAME_DIR_BOTH 0x0018
81
82/**
83 * Definition for cmd_status
84 */
85#define MFI_CMD_STATUS_POLL_MODE 0xFF
86
87/**
88 * MFI command opcodes
89 */
90#define MFI_CMD_INIT 0x00
91#define MFI_CMD_LD_READ 0x01
92#define MFI_CMD_LD_WRITE 0x02
93#define MFI_CMD_LD_SCSI_IO 0x03
94#define MFI_CMD_PD_SCSI_IO 0x04
95#define MFI_CMD_DCMD 0x05
96#define MFI_CMD_ABORT 0x06
97#define MFI_CMD_SMP 0x07
98#define MFI_CMD_STP 0x08
99
100#define MR_DCMD_CTRL_GET_INFO 0x01010000
101
102#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
103#define MR_FLUSH_CTRL_CACHE 0x01
104#define MR_FLUSH_DISK_CACHE 0x02
105
106#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
107#define MR_ENABLE_DRIVE_SPINDOWN 0x01
108
109#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
110#define MR_DCMD_CTRL_EVENT_GET 0x01040300
111#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
112#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
113
114#define MR_DCMD_CLUSTER 0x08000000
115#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
116#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
117
118/**
119 * MFI command completion codes
120 */
121enum MFI_STAT {
122 MFI_STAT_OK = 0x00,
123 MFI_STAT_INVALID_CMD = 0x01,
124 MFI_STAT_INVALID_DCMD = 0x02,
125 MFI_STAT_INVALID_PARAMETER = 0x03,
126 MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
127 MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
128 MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
129 MFI_STAT_APP_IN_USE = 0x07,
130 MFI_STAT_APP_NOT_INITIALIZED = 0x08,
131 MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
132 MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
133 MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
134 MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
135 MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
136 MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
137 MFI_STAT_FLASH_BUSY = 0x0f,
138 MFI_STAT_FLASH_ERROR = 0x10,
139 MFI_STAT_FLASH_IMAGE_BAD = 0x11,
140 MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
141 MFI_STAT_FLASH_NOT_OPEN = 0x13,
142 MFI_STAT_FLASH_NOT_STARTED = 0x14,
143 MFI_STAT_FLUSH_FAILED = 0x15,
144 MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
145 MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
146 MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
147 MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
148 MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
149 MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
150 MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
151 MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
152 MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
153 MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
154 MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
155 MFI_STAT_MFC_HW_ERROR = 0x21,
156 MFI_STAT_NO_HW_PRESENT = 0x22,
157 MFI_STAT_NOT_FOUND = 0x23,
158 MFI_STAT_NOT_IN_ENCL = 0x24,
159 MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
160 MFI_STAT_PD_TYPE_WRONG = 0x26,
161 MFI_STAT_PR_DISABLED = 0x27,
162 MFI_STAT_ROW_INDEX_INVALID = 0x28,
163 MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
164 MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
165 MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
166 MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
167 MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
168 MFI_STAT_SCSI_IO_FAILED = 0x2e,
169 MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
170 MFI_STAT_SHUTDOWN_FAILED = 0x30,
171 MFI_STAT_TIME_NOT_SET = 0x31,
172 MFI_STAT_WRONG_STATE = 0x32,
173 MFI_STAT_LD_OFFLINE = 0x33,
174 MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
175 MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
176 MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
177 MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
178 MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
179
180 MFI_STAT_INVALID_STATUS = 0xFF
181};
182
183/*
184 * Number of mailbox bytes in DCMD message frame
185 */
186#define MFI_MBOX_SIZE 12
187
188enum MR_EVT_CLASS {
189
190 MR_EVT_CLASS_DEBUG = -2,
191 MR_EVT_CLASS_PROGRESS = -1,
192 MR_EVT_CLASS_INFO = 0,
193 MR_EVT_CLASS_WARNING = 1,
194 MR_EVT_CLASS_CRITICAL = 2,
195 MR_EVT_CLASS_FATAL = 3,
196 MR_EVT_CLASS_DEAD = 4,
197
198};
199
200enum MR_EVT_LOCALE {
201
202 MR_EVT_LOCALE_LD = 0x0001,
203 MR_EVT_LOCALE_PD = 0x0002,
204 MR_EVT_LOCALE_ENCL = 0x0004,
205 MR_EVT_LOCALE_BBU = 0x0008,
206 MR_EVT_LOCALE_SAS = 0x0010,
207 MR_EVT_LOCALE_CTRL = 0x0020,
208 MR_EVT_LOCALE_CONFIG = 0x0040,
209 MR_EVT_LOCALE_CLUSTER = 0x0080,
210 MR_EVT_LOCALE_ALL = 0xffff,
211
212};
213
214enum MR_EVT_ARGS {
215
216 MR_EVT_ARGS_NONE,
217 MR_EVT_ARGS_CDB_SENSE,
218 MR_EVT_ARGS_LD,
219 MR_EVT_ARGS_LD_COUNT,
220 MR_EVT_ARGS_LD_LBA,
221 MR_EVT_ARGS_LD_OWNER,
222 MR_EVT_ARGS_LD_LBA_PD_LBA,
223 MR_EVT_ARGS_LD_PROG,
224 MR_EVT_ARGS_LD_STATE,
225 MR_EVT_ARGS_LD_STRIP,
226 MR_EVT_ARGS_PD,
227 MR_EVT_ARGS_PD_ERR,
228 MR_EVT_ARGS_PD_LBA,
229 MR_EVT_ARGS_PD_LBA_LD,
230 MR_EVT_ARGS_PD_PROG,
231 MR_EVT_ARGS_PD_STATE,
232 MR_EVT_ARGS_PCI,
233 MR_EVT_ARGS_RATE,
234 MR_EVT_ARGS_STR,
235 MR_EVT_ARGS_TIME,
236 MR_EVT_ARGS_ECC,
237
238};
239
240/*
241 * SAS controller properties
242 */
243struct megasas_ctrl_prop {
244
245 u16 seq_num;
246 u16 pred_fail_poll_interval;
247 u16 intr_throttle_count;
248 u16 intr_throttle_timeouts;
249 u8 rebuild_rate;
250 u8 patrol_read_rate;
251 u8 bgi_rate;
252 u8 cc_rate;
253 u8 recon_rate;
254 u8 cache_flush_interval;
255 u8 spinup_drv_count;
256 u8 spinup_delay;
257 u8 cluster_enable;
258 u8 coercion_mode;
259 u8 alarm_enable;
260 u8 disable_auto_rebuild;
261 u8 disable_battery_warn;
262 u8 ecc_bucket_size;
263 u16 ecc_bucket_leak_rate;
264 u8 restore_hotspare_on_insertion;
265 u8 expose_encl_devices;
266 u8 reserved[38];
267
268} __attribute__ ((packed));
269
270/*
271 * SAS controller information
272 */
273struct megasas_ctrl_info {
274
275 /*
276 * PCI device information
277 */
278 struct {
279
280 u16 vendor_id;
281 u16 device_id;
282 u16 sub_vendor_id;
283 u16 sub_device_id;
284 u8 reserved[24];
285
286 } __attribute__ ((packed)) pci;
287
288 /*
289 * Host interface information
290 */
291 struct {
292
293 u8 PCIX:1;
294 u8 PCIE:1;
295 u8 iSCSI:1;
296 u8 SAS_3G:1;
297 u8 reserved_0:4;
298 u8 reserved_1[6];
299 u8 port_count;
300 u64 port_addr[8];
301
302 } __attribute__ ((packed)) host_interface;
303
304 /*
305 * Device (backend) interface information
306 */
307 struct {
308
309 u8 SPI:1;
310 u8 SAS_3G:1;
311 u8 SATA_1_5G:1;
312 u8 SATA_3G:1;
313 u8 reserved_0:4;
314 u8 reserved_1[6];
315 u8 port_count;
316 u64 port_addr[8];
317
318 } __attribute__ ((packed)) device_interface;
319
320 /*
321 * List of components residing in flash. All str are null terminated
322 */
323 u32 image_check_word;
324 u32 image_component_count;
325
326 struct {
327
328 char name[8];
329 char version[32];
330 char build_date[16];
331 char built_time[16];
332
333 } __attribute__ ((packed)) image_component[8];
334
335 /*
336 * List of flash components that have been flashed on the card, but
337 * are not in use, pending reset of the adapter. This list will be
338 * empty if a flash operation has not occurred. All stings are null
339 * terminated
340 */
341 u32 pending_image_component_count;
342
343 struct {
344
345 char name[8];
346 char version[32];
347 char build_date[16];
348 char build_time[16];
349
350 } __attribute__ ((packed)) pending_image_component[8];
351
352 u8 max_arms;
353 u8 max_spans;
354 u8 max_arrays;
355 u8 max_lds;
356
357 char product_name[80];
358 char serial_no[32];
359
360 /*
361 * Other physical/controller/operation information. Indicates the
362 * presence of the hardware
363 */
364 struct {
365
366 u32 bbu:1;
367 u32 alarm:1;
368 u32 nvram:1;
369 u32 uart:1;
370 u32 reserved:28;
371
372 } __attribute__ ((packed)) hw_present;
373
374 u32 current_fw_time;
375
376 /*
377 * Maximum data transfer sizes
378 */
379 u16 max_concurrent_cmds;
380 u16 max_sge_count;
381 u32 max_request_size;
382
383 /*
384 * Logical and physical device counts
385 */
386 u16 ld_present_count;
387 u16 ld_degraded_count;
388 u16 ld_offline_count;
389
390 u16 pd_present_count;
391 u16 pd_disk_present_count;
392 u16 pd_disk_pred_failure_count;
393 u16 pd_disk_failed_count;
394
395 /*
396 * Memory size information
397 */
398 u16 nvram_size;
399 u16 memory_size;
400 u16 flash_size;
401
402 /*
403 * Error counters
404 */
405 u16 mem_correctable_error_count;
406 u16 mem_uncorrectable_error_count;
407
408 /*
409 * Cluster information
410 */
411 u8 cluster_permitted;
412 u8 cluster_active;
413
414 /*
415 * Additional max data transfer sizes
416 */
417 u16 max_strips_per_io;
418
419 /*
420 * Controller capabilities structures
421 */
422 struct {
423
424 u32 raid_level_0:1;
425 u32 raid_level_1:1;
426 u32 raid_level_5:1;
427 u32 raid_level_1E:1;
428 u32 raid_level_6:1;
429 u32 reserved:27;
430
431 } __attribute__ ((packed)) raid_levels;
432
433 struct {
434
435 u32 rbld_rate:1;
436 u32 cc_rate:1;
437 u32 bgi_rate:1;
438 u32 recon_rate:1;
439 u32 patrol_rate:1;
440 u32 alarm_control:1;
441 u32 cluster_supported:1;
442 u32 bbu:1;
443 u32 spanning_allowed:1;
444 u32 dedicated_hotspares:1;
445 u32 revertible_hotspares:1;
446 u32 foreign_config_import:1;
447 u32 self_diagnostic:1;
448 u32 mixed_redundancy_arr:1;
449 u32 global_hot_spares:1;
450 u32 reserved:17;
451
452 } __attribute__ ((packed)) adapter_operations;
453
454 struct {
455
456 u32 read_policy:1;
457 u32 write_policy:1;
458 u32 io_policy:1;
459 u32 access_policy:1;
460 u32 disk_cache_policy:1;
461 u32 reserved:27;
462
463 } __attribute__ ((packed)) ld_operations;
464
465 struct {
466
467 u8 min;
468 u8 max;
469 u8 reserved[2];
470
471 } __attribute__ ((packed)) stripe_sz_ops;
472
473 struct {
474
475 u32 force_online:1;
476 u32 force_offline:1;
477 u32 force_rebuild:1;
478 u32 reserved:29;
479
480 } __attribute__ ((packed)) pd_operations;
481
482 struct {
483
484 u32 ctrl_supports_sas:1;
485 u32 ctrl_supports_sata:1;
486 u32 allow_mix_in_encl:1;
487 u32 allow_mix_in_ld:1;
488 u32 allow_sata_in_cluster:1;
489 u32 reserved:27;
490
491 } __attribute__ ((packed)) pd_mix_support;
492
493 /*
494 * Define ECC single-bit-error bucket information
495 */
496 u8 ecc_bucket_count;
497 u8 reserved_2[11];
498
499 /*
500 * Include the controller properties (changeable items)
501 */
502 struct megasas_ctrl_prop properties;
503
504 /*
505 * Define FW pkg version (set in envt v'bles on OEM basis)
506 */
507 char package_version[0x60];
508
509 u8 pad[0x800 - 0x6a0];
510
511} __attribute__ ((packed));
512
513/*
514 * ===============================
515 * MegaRAID SAS driver definitions
516 * ===============================
517 */
518#define MEGASAS_MAX_PD_CHANNELS 2
519#define MEGASAS_MAX_LD_CHANNELS 2
520#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
521 MEGASAS_MAX_LD_CHANNELS)
522#define MEGASAS_MAX_DEV_PER_CHANNEL 128
523#define MEGASAS_DEFAULT_INIT_ID -1
524#define MEGASAS_MAX_LUN 8
525#define MEGASAS_MAX_LD 64
526
527/*
528 * When SCSI mid-layer calls driver's reset routine, driver waits for
529 * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
530 * that the driver cannot _actually_ abort or reset pending commands. While
531 * it is waiting for the commands to complete, it prints a diagnostic message
532 * every MEGASAS_RESET_NOTICE_INTERVAL seconds
533 */
534#define MEGASAS_RESET_WAIT_TIME 180
535#define MEGASAS_RESET_NOTICE_INTERVAL 5
536
537#define MEGASAS_IOCTL_CMD 0
538
539/*
540 * FW reports the maximum of number of commands that it can accept (maximum
541 * commands that can be outstanding) at any time. The driver must report a
542 * lower number to the mid layer because it can issue a few internal commands
543 * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
544 * is shown below
545 */
546#define MEGASAS_INT_CMDS 32
547
548/*
549 * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
550 * SGLs based on the size of dma_addr_t
551 */
552#define IS_DMA64 (sizeof(dma_addr_t) == 8)
553
554#define MFI_OB_INTR_STATUS_MASK 0x00000002
555#define MFI_POLL_TIMEOUT_SECS 10
556
557struct megasas_register_set {
558
559 u32 reserved_0[4]; /*0000h */
560
561 u32 inbound_msg_0; /*0010h */
562 u32 inbound_msg_1; /*0014h */
563 u32 outbound_msg_0; /*0018h */
564 u32 outbound_msg_1; /*001Ch */
565
566 u32 inbound_doorbell; /*0020h */
567 u32 inbound_intr_status; /*0024h */
568 u32 inbound_intr_mask; /*0028h */
569
570 u32 outbound_doorbell; /*002Ch */
571 u32 outbound_intr_status; /*0030h */
572 u32 outbound_intr_mask; /*0034h */
573
574 u32 reserved_1[2]; /*0038h */
575
576 u32 inbound_queue_port; /*0040h */
577 u32 outbound_queue_port; /*0044h */
578
579 u32 reserved_2; /*004Ch */
580
581 u32 index_registers[1004]; /*0050h */
582
583} __attribute__ ((packed));
584
585struct megasas_sge32 {
586
587 u32 phys_addr;
588 u32 length;
589
590} __attribute__ ((packed));
591
592struct megasas_sge64 {
593
594 u64 phys_addr;
595 u32 length;
596
597} __attribute__ ((packed));
598
599union megasas_sgl {
600
601 struct megasas_sge32 sge32[1];
602 struct megasas_sge64 sge64[1];
603
604} __attribute__ ((packed));
605
606struct megasas_header {
607
608 u8 cmd; /*00h */
609 u8 sense_len; /*01h */
610 u8 cmd_status; /*02h */
611 u8 scsi_status; /*03h */
612
613 u8 target_id; /*04h */
614 u8 lun; /*05h */
615 u8 cdb_len; /*06h */
616 u8 sge_count; /*07h */
617
618 u32 context; /*08h */
619 u32 pad_0; /*0Ch */
620
621 u16 flags; /*10h */
622 u16 timeout; /*12h */
623 u32 data_xferlen; /*14h */
624
625} __attribute__ ((packed));
626
627union megasas_sgl_frame {
628
629 struct megasas_sge32 sge32[8];
630 struct megasas_sge64 sge64[5];
631
632} __attribute__ ((packed));
633
634struct megasas_init_frame {
635
636 u8 cmd; /*00h */
637 u8 reserved_0; /*01h */
638 u8 cmd_status; /*02h */
639
640 u8 reserved_1; /*03h */
641 u32 reserved_2; /*04h */
642
643 u32 context; /*08h */
644 u32 pad_0; /*0Ch */
645
646 u16 flags; /*10h */
647 u16 reserved_3; /*12h */
648 u32 data_xfer_len; /*14h */
649
650 u32 queue_info_new_phys_addr_lo; /*18h */
651 u32 queue_info_new_phys_addr_hi; /*1Ch */
652 u32 queue_info_old_phys_addr_lo; /*20h */
653 u32 queue_info_old_phys_addr_hi; /*24h */
654
655 u32 reserved_4[6]; /*28h */
656
657} __attribute__ ((packed));
658
659struct megasas_init_queue_info {
660
661 u32 init_flags; /*00h */
662 u32 reply_queue_entries; /*04h */
663
664 u32 reply_queue_start_phys_addr_lo; /*08h */
665 u32 reply_queue_start_phys_addr_hi; /*0Ch */
666 u32 producer_index_phys_addr_lo; /*10h */
667 u32 producer_index_phys_addr_hi; /*14h */
668 u32 consumer_index_phys_addr_lo; /*18h */
669 u32 consumer_index_phys_addr_hi; /*1Ch */
670
671} __attribute__ ((packed));
672
673struct megasas_io_frame {
674
675 u8 cmd; /*00h */
676 u8 sense_len; /*01h */
677 u8 cmd_status; /*02h */
678 u8 scsi_status; /*03h */
679
680 u8 target_id; /*04h */
681 u8 access_byte; /*05h */
682 u8 reserved_0; /*06h */
683 u8 sge_count; /*07h */
684
685 u32 context; /*08h */
686 u32 pad_0; /*0Ch */
687
688 u16 flags; /*10h */
689 u16 timeout; /*12h */
690 u32 lba_count; /*14h */
691
692 u32 sense_buf_phys_addr_lo; /*18h */
693 u32 sense_buf_phys_addr_hi; /*1Ch */
694
695 u32 start_lba_lo; /*20h */
696 u32 start_lba_hi; /*24h */
697
698 union megasas_sgl sgl; /*28h */
699
700} __attribute__ ((packed));
701
702struct megasas_pthru_frame {
703
704 u8 cmd; /*00h */
705 u8 sense_len; /*01h */
706 u8 cmd_status; /*02h */
707 u8 scsi_status; /*03h */
708
709 u8 target_id; /*04h */
710 u8 lun; /*05h */
711 u8 cdb_len; /*06h */
712 u8 sge_count; /*07h */
713
714 u32 context; /*08h */
715 u32 pad_0; /*0Ch */
716
717 u16 flags; /*10h */
718 u16 timeout; /*12h */
719 u32 data_xfer_len; /*14h */
720
721 u32 sense_buf_phys_addr_lo; /*18h */
722 u32 sense_buf_phys_addr_hi; /*1Ch */
723
724 u8 cdb[16]; /*20h */
725 union megasas_sgl sgl; /*30h */
726
727} __attribute__ ((packed));
728
729struct megasas_dcmd_frame {
730
731 u8 cmd; /*00h */
732 u8 reserved_0; /*01h */
733 u8 cmd_status; /*02h */
734 u8 reserved_1[4]; /*03h */
735 u8 sge_count; /*07h */
736
737 u32 context; /*08h */
738 u32 pad_0; /*0Ch */
739
740 u16 flags; /*10h */
741 u16 timeout; /*12h */
742
743 u32 data_xfer_len; /*14h */
744 u32 opcode; /*18h */
745
746 union { /*1Ch */
747 u8 b[12];
748 u16 s[6];
749 u32 w[3];
750 } mbox;
751
752 union megasas_sgl sgl; /*28h */
753
754} __attribute__ ((packed));
755
756struct megasas_abort_frame {
757
758 u8 cmd; /*00h */
759 u8 reserved_0; /*01h */
760 u8 cmd_status; /*02h */
761
762 u8 reserved_1; /*03h */
763 u32 reserved_2; /*04h */
764
765 u32 context; /*08h */
766 u32 pad_0; /*0Ch */
767
768 u16 flags; /*10h */
769 u16 reserved_3; /*12h */
770 u32 reserved_4; /*14h */
771
772 u32 abort_context; /*18h */
773 u32 pad_1; /*1Ch */
774
775 u32 abort_mfi_phys_addr_lo; /*20h */
776 u32 abort_mfi_phys_addr_hi; /*24h */
777
778 u32 reserved_5[6]; /*28h */
779
780} __attribute__ ((packed));
781
782struct megasas_smp_frame {
783
784 u8 cmd; /*00h */
785 u8 reserved_1; /*01h */
786 u8 cmd_status; /*02h */
787 u8 connection_status; /*03h */
788
789 u8 reserved_2[3]; /*04h */
790 u8 sge_count; /*07h */
791
792 u32 context; /*08h */
793 u32 pad_0; /*0Ch */
794
795 u16 flags; /*10h */
796 u16 timeout; /*12h */
797
798 u32 data_xfer_len; /*14h */
799 u64 sas_addr; /*18h */
800
801 union {
802 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */
803 struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */
804 } sgl;
805
806} __attribute__ ((packed));
807
808struct megasas_stp_frame {
809
810 u8 cmd; /*00h */
811 u8 reserved_1; /*01h */
812 u8 cmd_status; /*02h */
813 u8 reserved_2; /*03h */
814
815 u8 target_id; /*04h */
816 u8 reserved_3[2]; /*05h */
817 u8 sge_count; /*07h */
818
819 u32 context; /*08h */
820 u32 pad_0; /*0Ch */
821
822 u16 flags; /*10h */
823 u16 timeout; /*12h */
824
825 u32 data_xfer_len; /*14h */
826
827 u16 fis[10]; /*18h */
828 u32 stp_flags;
829
830 union {
831 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */
832 struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */
833 } sgl;
834
835} __attribute__ ((packed));
836
837union megasas_frame {
838
839 struct megasas_header hdr;
840 struct megasas_init_frame init;
841 struct megasas_io_frame io;
842 struct megasas_pthru_frame pthru;
843 struct megasas_dcmd_frame dcmd;
844 struct megasas_abort_frame abort;
845 struct megasas_smp_frame smp;
846 struct megasas_stp_frame stp;
847
848 u8 raw_bytes[64];
849};
850
851struct megasas_cmd;
852
853union megasas_evt_class_locale {
854
855 struct {
856 u16 locale;
857 u8 reserved;
858 s8 class;
859 } __attribute__ ((packed)) members;
860
861 u32 word;
862
863} __attribute__ ((packed));
864
865struct megasas_evt_log_info {
866 u32 newest_seq_num;
867 u32 oldest_seq_num;
868 u32 clear_seq_num;
869 u32 shutdown_seq_num;
870 u32 boot_seq_num;
871
872} __attribute__ ((packed));
873
874struct megasas_progress {
875
876 u16 progress;
877 u16 elapsed_seconds;
878
879} __attribute__ ((packed));
880
881struct megasas_evtarg_ld {
882
883 u16 target_id;
884 u8 ld_index;
885 u8 reserved;
886
887} __attribute__ ((packed));
888
889struct megasas_evtarg_pd {
890 u16 device_id;
891 u8 encl_index;
892 u8 slot_number;
893
894} __attribute__ ((packed));
895
896struct megasas_evt_detail {
897
898 u32 seq_num;
899 u32 time_stamp;
900 u32 code;
901 union megasas_evt_class_locale cl;
902 u8 arg_type;
903 u8 reserved1[15];
904
905 union {
906 struct {
907 struct megasas_evtarg_pd pd;
908 u8 cdb_length;
909 u8 sense_length;
910 u8 reserved[2];
911 u8 cdb[16];
912 u8 sense[64];
913 } __attribute__ ((packed)) cdbSense;
914
915 struct megasas_evtarg_ld ld;
916
917 struct {
918 struct megasas_evtarg_ld ld;
919 u64 count;
920 } __attribute__ ((packed)) ld_count;
921
922 struct {
923 u64 lba;
924 struct megasas_evtarg_ld ld;
925 } __attribute__ ((packed)) ld_lba;
926
927 struct {
928 struct megasas_evtarg_ld ld;
929 u32 prevOwner;
930 u32 newOwner;
931 } __attribute__ ((packed)) ld_owner;
932
933 struct {
934 u64 ld_lba;
935 u64 pd_lba;
936 struct megasas_evtarg_ld ld;
937 struct megasas_evtarg_pd pd;
938 } __attribute__ ((packed)) ld_lba_pd_lba;
939
940 struct {
941 struct megasas_evtarg_ld ld;
942 struct megasas_progress prog;
943 } __attribute__ ((packed)) ld_prog;
944
945 struct {
946 struct megasas_evtarg_ld ld;
947 u32 prev_state;
948 u32 new_state;
949 } __attribute__ ((packed)) ld_state;
950
951 struct {
952 u64 strip;
953 struct megasas_evtarg_ld ld;
954 } __attribute__ ((packed)) ld_strip;
955
956 struct megasas_evtarg_pd pd;
957
958 struct {
959 struct megasas_evtarg_pd pd;
960 u32 err;
961 } __attribute__ ((packed)) pd_err;
962
963 struct {
964 u64 lba;
965 struct megasas_evtarg_pd pd;
966 } __attribute__ ((packed)) pd_lba;
967
968 struct {
969 u64 lba;
970 struct megasas_evtarg_pd pd;
971 struct megasas_evtarg_ld ld;
972 } __attribute__ ((packed)) pd_lba_ld;
973
974 struct {
975 struct megasas_evtarg_pd pd;
976 struct megasas_progress prog;
977 } __attribute__ ((packed)) pd_prog;
978
979 struct {
980 struct megasas_evtarg_pd pd;
981 u32 prevState;
982 u32 newState;
983 } __attribute__ ((packed)) pd_state;
984
985 struct {
986 u16 vendorId;
987 u16 deviceId;
988 u16 subVendorId;
989 u16 subDeviceId;
990 } __attribute__ ((packed)) pci;
991
992 u32 rate;
993 char str[96];
994
995 struct {
996 u32 rtc;
997 u32 elapsedSeconds;
998 } __attribute__ ((packed)) time;
999
1000 struct {
1001 u32 ecar;
1002 u32 elog;
1003 char str[64];
1004 } __attribute__ ((packed)) ecc;
1005
1006 u8 b[96];
1007 u16 s[48];
1008 u32 w[24];
1009 u64 d[12];
1010 } args;
1011
1012 char description[128];
1013
1014} __attribute__ ((packed));
1015
1016struct megasas_instance {
1017
1018 u32 *producer;
1019 dma_addr_t producer_h;
1020 u32 *consumer;
1021 dma_addr_t consumer_h;
1022
1023 u32 *reply_queue;
1024 dma_addr_t reply_queue_h;
1025
1026 unsigned long base_addr;
1027 struct megasas_register_set __iomem *reg_set;
1028
1029 s8 init_id;
1030 u8 reserved[3];
1031
1032 u16 max_num_sge;
1033 u16 max_fw_cmds;
1034 u32 max_sectors_per_req;
1035
1036 struct megasas_cmd **cmd_list;
1037 struct list_head cmd_pool;
1038 spinlock_t cmd_pool_lock;
1039 struct dma_pool *frame_dma_pool;
1040 struct dma_pool *sense_dma_pool;
1041
1042 struct megasas_evt_detail *evt_detail;
1043 dma_addr_t evt_detail_h;
1044 struct megasas_cmd *aen_cmd;
1045 struct semaphore aen_mutex;
1046 struct semaphore ioctl_sem;
1047
1048 struct Scsi_Host *host;
1049
1050 wait_queue_head_t int_cmd_wait_q;
1051 wait_queue_head_t abort_cmd_wait_q;
1052
1053 struct pci_dev *pdev;
1054 u32 unique_id;
1055
1056 u32 fw_outstanding;
1057 u32 hw_crit_error;
1058 spinlock_t instance_lock;
1059};
1060
1061#define MEGASAS_IS_LOGICAL(scp) \
1062 (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
1063
1064#define MEGASAS_DEV_INDEX(inst, scp) \
1065 ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
1066 scp->device->id
1067
1068struct megasas_cmd {
1069
1070 union megasas_frame *frame;
1071 dma_addr_t frame_phys_addr;
1072 u8 *sense;
1073 dma_addr_t sense_phys_addr;
1074
1075 u32 index;
1076 u8 sync_cmd;
1077 u8 cmd_status;
1078 u16 abort_aen;
1079
1080 struct list_head list;
1081 struct scsi_cmnd *scmd;
1082 struct megasas_instance *instance;
1083 u32 frame_count;
1084};
1085
1086#define MAX_MGMT_ADAPTERS 1024
1087#define MAX_IOCTL_SGE 16
1088
1089struct megasas_iocpacket {
1090
1091 u16 host_no;
1092 u16 __pad1;
1093 u32 sgl_off;
1094 u32 sge_count;
1095 u32 sense_off;
1096 u32 sense_len;
1097 union {
1098 u8 raw[128];
1099 struct megasas_header hdr;
1100 } frame;
1101
1102 struct iovec sgl[MAX_IOCTL_SGE];
1103
1104} __attribute__ ((packed));
1105
1106struct megasas_aen {
1107 u16 host_no;
1108 u16 __pad1;
1109 u32 seq_num;
1110 u32 class_locale_word;
1111} __attribute__ ((packed));
1112
1113#ifdef CONFIG_COMPAT
1114struct compat_megasas_iocpacket {
1115 u16 host_no;
1116 u16 __pad1;
1117 u32 sgl_off;
1118 u32 sge_count;
1119 u32 sense_off;
1120 u32 sense_len;
1121 union {
1122 u8 raw[128];
1123 struct megasas_header hdr;
1124 } frame;
1125 struct compat_iovec sgl[MAX_IOCTL_SGE];
1126} __attribute__ ((packed));
1127
1128#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct compat_megasas_iocpacket)
1129#else
1130#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
1131#endif
1132
1133#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
1134
1135struct megasas_mgmt_info {
1136
1137 u16 count;
1138 struct megasas_instance *instance[MAX_MGMT_ADAPTERS];
1139 int max_index;
1140};
1141
1142#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index a4857db4f9b8..b235556b7b65 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1959,22 +1959,35 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1959 /* Set it up */ 1959 /* Set it up */
1960 mesh_init(ms); 1960 mesh_init(ms);
1961 1961
1962 /* XXX FIXME: error should be fatal */ 1962 /* Request interrupt */
1963 if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) 1963 if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) {
1964 printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr); 1964 printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr);
1965 goto out_shutdown;
1966 }
1965 1967
1966 /* XXX FIXME: handle failure */ 1968 /* Add scsi host & scan */
1967 scsi_add_host(mesh_host, &mdev->ofdev.dev); 1969 if (scsi_add_host(mesh_host, &mdev->ofdev.dev))
1970 goto out_release_irq;
1968 scsi_scan_host(mesh_host); 1971 scsi_scan_host(mesh_host);
1969 1972
1970 return 0; 1973 return 0;
1971 1974
1972out_unmap: 1975 out_release_irq:
1976 free_irq(ms->meshintr, ms);
1977 out_shutdown:
1978 /* shutdown & reset bus in case of error or macos can be confused
1979 * at reboot if the bus was set to synchronous mode already
1980 */
1981 mesh_shutdown(mdev);
1982 set_mesh_power(ms, 0);
1983 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
1984 ms->dma_cmd_space, ms->dma_cmd_bus);
1985 out_unmap:
1973 iounmap(ms->dma); 1986 iounmap(ms->dma);
1974 iounmap(ms->mesh); 1987 iounmap(ms->mesh);
1975out_free: 1988 out_free:
1976 scsi_host_put(mesh_host); 1989 scsi_host_put(mesh_host);
1977out_release: 1990 out_release:
1978 macio_release_resources(mdev); 1991 macio_release_resources(mdev);
1979 1992
1980 return -ENODEV; 1993 return -ENODEV;
@@ -2001,7 +2014,7 @@ static int mesh_remove(struct macio_dev *mdev)
2001 2014
2002 /* Free DMA commands memory */ 2015 /* Free DMA commands memory */
2003 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, 2016 pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
2004 ms->dma_cmd_space, ms->dma_cmd_bus); 2017 ms->dma_cmd_space, ms->dma_cmd_bus);
2005 2018
2006 /* Release memory resources */ 2019 /* Release memory resources */
2007 macio_release_resources(mdev); 2020 macio_release_resources(mdev);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3e9b64137873..23d095d3817b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -201,6 +201,7 @@ int
201qla2100_pci_config(scsi_qla_host_t *ha) 201qla2100_pci_config(scsi_qla_host_t *ha)
202{ 202{
203 uint16_t w, mwi; 203 uint16_t w, mwi;
204 uint32_t d;
204 unsigned long flags; 205 unsigned long flags;
205 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 206 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
206 207
@@ -215,9 +216,9 @@ qla2100_pci_config(scsi_qla_host_t *ha)
215 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 216 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
216 217
217 /* Reset expansion ROM address decode enable */ 218 /* Reset expansion ROM address decode enable */
218 pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w); 219 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
219 w &= ~PCI_ROM_ADDRESS_ENABLE; 220 d &= ~PCI_ROM_ADDRESS_ENABLE;
220 pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w); 221 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
221 222
222 /* Get PCI bus information. */ 223 /* Get PCI bus information. */
223 spin_lock_irqsave(&ha->hardware_lock, flags); 224 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -237,6 +238,7 @@ int
237qla2300_pci_config(scsi_qla_host_t *ha) 238qla2300_pci_config(scsi_qla_host_t *ha)
238{ 239{
239 uint16_t w, mwi; 240 uint16_t w, mwi;
241 uint32_t d;
240 unsigned long flags = 0; 242 unsigned long flags = 0;
241 uint32_t cnt; 243 uint32_t cnt;
242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 244 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -302,9 +304,9 @@ qla2300_pci_config(scsi_qla_host_t *ha)
302 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80); 304 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
303 305
304 /* Reset expansion ROM address decode enable */ 306 /* Reset expansion ROM address decode enable */
305 pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w); 307 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
306 w &= ~PCI_ROM_ADDRESS_ENABLE; 308 d &= ~PCI_ROM_ADDRESS_ENABLE;
307 pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w); 309 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
308 310
309 /* Get PCI bus information. */ 311 /* Get PCI bus information. */
310 spin_lock_irqsave(&ha->hardware_lock, flags); 312 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -324,6 +326,7 @@ int
324qla24xx_pci_config(scsi_qla_host_t *ha) 326qla24xx_pci_config(scsi_qla_host_t *ha)
325{ 327{
326 uint16_t w, mwi; 328 uint16_t w, mwi;
329 uint32_t d;
327 unsigned long flags = 0; 330 unsigned long flags = 0;
328 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 331 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
329 int pcix_cmd_reg, pcie_dctl_reg; 332 int pcix_cmd_reg, pcie_dctl_reg;
@@ -366,9 +369,9 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
366 } 369 }
367 370
368 /* Reset expansion ROM address decode enable */ 371 /* Reset expansion ROM address decode enable */
369 pci_read_config_word(ha->pdev, PCI_ROM_ADDRESS, &w); 372 pci_read_config_dword(ha->pdev, PCI_ROM_ADDRESS, &d);
370 w &= ~PCI_ROM_ADDRESS_ENABLE; 373 d &= ~PCI_ROM_ADDRESS_ENABLE;
371 pci_write_config_word(ha->pdev, PCI_ROM_ADDRESS, w); 374 pci_write_config_dword(ha->pdev, PCI_ROM_ADDRESS, d);
372 375
373 /* Get PCI bus information. */ 376 /* Get PCI bus information. */
374 spin_lock_irqsave(&ha->hardware_lock, flags); 377 spin_lock_irqsave(&ha->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index bdc3bc74bbe1..1eba98828636 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -330,6 +330,8 @@ qla2x00_update_login_fcport(scsi_qla_host_t *ha, struct mbx_entry *mbxstat,
330 fcport->flags &= ~FCF_FAILOVER_NEEDED; 330 fcport->flags &= ~FCF_FAILOVER_NEEDED;
331 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX; 331 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
332 atomic_set(&fcport->state, FCS_ONLINE); 332 atomic_set(&fcport->state, FCS_ONLINE);
333 if (fcport->rport)
334 fc_remote_port_unblock(fcport->rport);
333} 335}
334 336
335 337
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index a1d62dee3be6..c05653c7779d 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -158,6 +158,8 @@ static struct pci_device_id nv_pci_tbl[] = {
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, 158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, 159 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, 160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
161 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
161 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 163 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
162 PCI_ANY_ID, PCI_ANY_ID, 164 PCI_ANY_ID, PCI_ANY_ID,
163 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 165 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index a63f93186e41..b227e51d12f4 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -161,7 +161,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
161{ 161{
162 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 162 struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
163 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device); 163 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
164 u32 val, val2; 164 u32 val, val2 = 0;
165 u8 pmr; 165 u8 pmr;
166 166
167 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ 167 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
@@ -289,7 +289,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
289 if (ent->device != 0x182) { 289 if (ent->device != 0x182) {
290 if ((pmr & SIS_PMR_COMBINED) == 0) { 290 if ((pmr & SIS_PMR_COMBINED) == 0) {
291 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in SATA mode\n"); 291 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in SATA mode\n");
292 port2_start=0x64; 292 port2_start = 64;
293 } 293 }
294 else { 294 else {
295 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in combined mode\n"); 295 printk(KERN_INFO "sata_sis: Detected SiS 180/181 chipset in combined mode\n");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index a780546eda9c..1f0ebabf6d47 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1265,9 +1265,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1265 list_for_each_safe(lh, lh_sf, &active_list) { 1265 list_for_each_safe(lh, lh_sf, &active_list) {
1266 scmd = list_entry(lh, struct scsi_cmnd, eh_entry); 1266 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1267 list_del_init(lh); 1267 list_del_init(lh);
1268 if (recovery) { 1268 if (recovery &&
1269 scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD); 1269 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1270 } else {
1271 scmd->result = (DID_ABORT << 16); 1270 scmd->result = (DID_ABORT << 16);
1272 scsi_finish_command(scmd); 1271 scsi_finish_command(scmd);
1273 } 1272 }
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 07b554affcf2..64fc9e21f35b 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -110,6 +110,7 @@ static struct {
110 {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */ 110 {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
111 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */ 111 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
112 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN}, 112 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
113 {"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
113 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */ 114 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
114 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */ 115 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
115 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */ 116 {"YAMAHA", "CRW8424S", "1.0", BLIST_NOLUN}, /* locks up */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 895c9452be4c..ad5342165079 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -50,7 +50,7 @@
50void scsi_eh_wakeup(struct Scsi_Host *shost) 50void scsi_eh_wakeup(struct Scsi_Host *shost)
51{ 51{
52 if (shost->host_busy == shost->host_failed) { 52 if (shost->host_busy == shost->host_failed) {
53 up(shost->eh_wait); 53 wake_up_process(shost->ehandler);
54 SCSI_LOG_ERROR_RECOVERY(5, 54 SCSI_LOG_ERROR_RECOVERY(5,
55 printk("Waking error handler thread\n")); 55 printk("Waking error handler thread\n"));
56 } 56 }
@@ -68,19 +68,24 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
68{ 68{
69 struct Scsi_Host *shost = scmd->device->host; 69 struct Scsi_Host *shost = scmd->device->host;
70 unsigned long flags; 70 unsigned long flags;
71 int ret = 0;
71 72
72 if (shost->eh_wait == NULL) 73 if (!shost->ehandler)
73 return 0; 74 return 0;
74 75
75 spin_lock_irqsave(shost->host_lock, flags); 76 spin_lock_irqsave(shost->host_lock, flags);
77 if (scsi_host_set_state(shost, SHOST_RECOVERY))
78 if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
79 goto out_unlock;
76 80
81 ret = 1;
77 scmd->eh_eflags |= eh_flag; 82 scmd->eh_eflags |= eh_flag;
78 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); 83 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
79 scsi_host_set_state(shost, SHOST_RECOVERY);
80 shost->host_failed++; 84 shost->host_failed++;
81 scsi_eh_wakeup(shost); 85 scsi_eh_wakeup(shost);
86 out_unlock:
82 spin_unlock_irqrestore(shost->host_lock, flags); 87 spin_unlock_irqrestore(shost->host_lock, flags);
83 return 1; 88 return ret;
84} 89}
85 90
86/** 91/**
@@ -176,8 +181,8 @@ void scsi_times_out(struct scsi_cmnd *scmd)
176 } 181 }
177 182
178 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 183 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
179 panic("Error handler thread not present at %p %p %s %d", 184 scmd->result |= DID_TIME_OUT << 16;
180 scmd, scmd->device->host, __FILE__, __LINE__); 185 __scsi_done(scmd);
181 } 186 }
182} 187}
183 188
@@ -196,8 +201,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
196{ 201{
197 int online; 202 int online;
198 203
199 wait_event(sdev->host->host_wait, (sdev->host->shost_state != 204 wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
200 SHOST_RECOVERY));
201 205
202 online = scsi_device_online(sdev); 206 online = scsi_device_online(sdev);
203 207
@@ -1441,6 +1445,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
1441static void scsi_restart_operations(struct Scsi_Host *shost) 1445static void scsi_restart_operations(struct Scsi_Host *shost)
1442{ 1446{
1443 struct scsi_device *sdev; 1447 struct scsi_device *sdev;
1448 unsigned long flags;
1444 1449
1445 /* 1450 /*
1446 * If the door was locked, we need to insert a door lock request 1451 * If the door was locked, we need to insert a door lock request
@@ -1460,7 +1465,11 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1460 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1465 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1461 __FUNCTION__)); 1466 __FUNCTION__));
1462 1467
1463 scsi_host_set_state(shost, SHOST_RUNNING); 1468 spin_lock_irqsave(shost->host_lock, flags);
1469 if (scsi_host_set_state(shost, SHOST_RUNNING))
1470 if (scsi_host_set_state(shost, SHOST_CANCEL))
1471 BUG_ON(scsi_host_set_state(shost, SHOST_DEL));
1472 spin_unlock_irqrestore(shost->host_lock, flags);
1464 1473
1465 wake_up(&shost->host_wait); 1474 wake_up(&shost->host_wait);
1466 1475
@@ -1582,40 +1591,31 @@ int scsi_error_handler(void *data)
1582{ 1591{
1583 struct Scsi_Host *shost = (struct Scsi_Host *) data; 1592 struct Scsi_Host *shost = (struct Scsi_Host *) data;
1584 int rtn; 1593 int rtn;
1585 DECLARE_MUTEX_LOCKED(sem);
1586 1594
1587 current->flags |= PF_NOFREEZE; 1595 current->flags |= PF_NOFREEZE;
1588 shost->eh_wait = &sem;
1589 1596
1597
1590 /* 1598 /*
1591 * Wake up the thread that created us. 1599 * Note - we always use TASK_INTERRUPTIBLE even if the module
1600 * was loaded as part of the kernel. The reason is that
1601 * UNINTERRUPTIBLE would cause this thread to be counted in
1602 * the load average as a running process, and an interruptible
1603 * wait doesn't.
1592 */ 1604 */
1593 SCSI_LOG_ERROR_RECOVERY(3, printk("Wake up parent of" 1605 set_current_state(TASK_INTERRUPTIBLE);
1594 " scsi_eh_%d\n",shost->host_no)); 1606 while (!kthread_should_stop()) {
1595 1607 if (shost->host_failed == 0 ||
1596 while (1) { 1608 shost->host_failed != shost->host_busy) {
1597 /* 1609 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
1598 * If we get a signal, it means we are supposed to go 1610 " scsi_eh_%d"
1599 * away and die. This typically happens if the user is 1611 " sleeping\n",
1600 * trying to unload a module. 1612 shost->host_no));
1601 */ 1613 schedule();
1602 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1614 set_current_state(TASK_INTERRUPTIBLE);
1603 " scsi_eh_%d" 1615 continue;
1604 " sleeping\n",shost->host_no)); 1616 }
1605
1606 /*
1607 * Note - we always use down_interruptible with the semaphore
1608 * even if the module was loaded as part of the kernel. The
1609 * reason is that down() will cause this thread to be counted
1610 * in the load average as a running process, and down
1611 * interruptible doesn't. Given that we need to allow this
1612 * thread to die if the driver was loaded as a module, using
1613 * semaphores isn't unreasonable.
1614 */
1615 down_interruptible(&sem);
1616 if (kthread_should_stop())
1617 break;
1618 1617
1618 __set_current_state(TASK_RUNNING);
1619 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" 1619 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler"
1620 " scsi_eh_%d waking" 1620 " scsi_eh_%d waking"
1621 " up\n",shost->host_no)); 1621 " up\n",shost->host_no));
@@ -1642,7 +1642,7 @@ int scsi_error_handler(void *data)
1642 * which are still online. 1642 * which are still online.
1643 */ 1643 */
1644 scsi_restart_operations(shost); 1644 scsi_restart_operations(shost);
1645 1645 set_current_state(TASK_INTERRUPTIBLE);
1646 } 1646 }
1647 1647
1648 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d" 1648 SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d"
@@ -1651,7 +1651,7 @@ int scsi_error_handler(void *data)
1651 /* 1651 /*
1652 * Make sure that nobody tries to wake us up again. 1652 * Make sure that nobody tries to wake us up again.
1653 */ 1653 */
1654 shost->eh_wait = NULL; 1654 shost->ehandler = NULL;
1655 return 0; 1655 return 0;
1656} 1656}
1657 1657
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index b7fddac81347..de7f98cc38fe 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -458,7 +458,7 @@ int scsi_nonblockable_ioctl(struct scsi_device *sdev, int cmd,
458 * error processing, as long as the device was opened 458 * error processing, as long as the device was opened
459 * non-blocking */ 459 * non-blocking */
460 if (filp && filp->f_flags & O_NONBLOCK) { 460 if (filp && filp->f_flags & O_NONBLOCK) {
461 if (sdev->host->shost_state == SHOST_RECOVERY) 461 if (scsi_host_in_recovery(sdev->host))
462 return -ENODEV; 462 return -ENODEV;
463 } else if (!scsi_block_when_processing_errors(sdev)) 463 } else if (!scsi_block_when_processing_errors(sdev))
464 return -ENODEV; 464 return -ENODEV;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 863bb6495daa..dc9c772bc874 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -118,7 +118,6 @@ static void scsi_unprep_request(struct request *req)
118 req->flags &= ~REQ_DONTPREP; 118 req->flags &= ~REQ_DONTPREP;
119 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL; 119 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
120 120
121 scsi_release_buffers(cmd);
122 scsi_put_command(cmd); 121 scsi_put_command(cmd);
123} 122}
124 123
@@ -140,14 +139,12 @@ static void scsi_unprep_request(struct request *req)
140 * commands. 139 * commands.
141 * Notes: This could be called either from an interrupt context or a 140 * Notes: This could be called either from an interrupt context or a
142 * normal process context. 141 * normal process context.
143 * Notes: Upon return, cmd is a stale pointer.
144 */ 142 */
145int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) 143int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
146{ 144{
147 struct Scsi_Host *host = cmd->device->host; 145 struct Scsi_Host *host = cmd->device->host;
148 struct scsi_device *device = cmd->device; 146 struct scsi_device *device = cmd->device;
149 struct request_queue *q = device->request_queue; 147 struct request_queue *q = device->request_queue;
150 struct request *req = cmd->request;
151 unsigned long flags; 148 unsigned long flags;
152 149
153 SCSI_LOG_MLQUEUE(1, 150 SCSI_LOG_MLQUEUE(1,
@@ -188,9 +185,8 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
188 * function. The SCSI request function detects the blocked condition 185 * function. The SCSI request function detects the blocked condition
189 * and plugs the queue appropriately. 186 * and plugs the queue appropriately.
190 */ 187 */
191 scsi_unprep_request(req);
192 spin_lock_irqsave(q->queue_lock, flags); 188 spin_lock_irqsave(q->queue_lock, flags);
193 blk_requeue_request(q, req); 189 blk_requeue_request(q, cmd->request);
194 spin_unlock_irqrestore(q->queue_lock, flags); 190 spin_unlock_irqrestore(q->queue_lock, flags);
195 191
196 scsi_run_queue(q); 192 scsi_run_queue(q);
@@ -451,7 +447,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
451 447
452 spin_lock_irqsave(shost->host_lock, flags); 448 spin_lock_irqsave(shost->host_lock, flags);
453 shost->host_busy--; 449 shost->host_busy--;
454 if (unlikely((shost->shost_state == SHOST_RECOVERY) && 450 if (unlikely(scsi_host_in_recovery(shost) &&
455 shost->host_failed)) 451 shost->host_failed))
456 scsi_eh_wakeup(shost); 452 scsi_eh_wakeup(shost);
457 spin_unlock(shost->host_lock); 453 spin_unlock(shost->host_lock);
@@ -1268,6 +1264,7 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
1268 } 1264 }
1269 } else { 1265 } else {
1270 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); 1266 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1267 cmd->cmd_len = req->cmd_len;
1271 if (rq_data_dir(req) == WRITE) 1268 if (rq_data_dir(req) == WRITE)
1272 cmd->sc_data_direction = DMA_TO_DEVICE; 1269 cmd->sc_data_direction = DMA_TO_DEVICE;
1273 else if (req->data_len) 1270 else if (req->data_len)
@@ -1342,7 +1339,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
1342 struct Scsi_Host *shost, 1339 struct Scsi_Host *shost,
1343 struct scsi_device *sdev) 1340 struct scsi_device *sdev)
1344{ 1341{
1345 if (shost->shost_state == SHOST_RECOVERY) 1342 if (scsi_host_in_recovery(shost))
1346 return 0; 1343 return 0;
1347 if (shost->host_busy == 0 && shost->host_blocked) { 1344 if (shost->host_busy == 0 && shost->host_blocked) {
1348 /* 1345 /*
@@ -1514,7 +1511,6 @@ static void scsi_request_fn(struct request_queue *q)
1514 * cases (host limits or settings) should run the queue at some 1511 * cases (host limits or settings) should run the queue at some
1515 * later time. 1512 * later time.
1516 */ 1513 */
1517 scsi_unprep_request(req);
1518 spin_lock_irq(q->queue_lock); 1514 spin_lock_irq(q->queue_lock);
1519 blk_requeue_request(q, req); 1515 blk_requeue_request(q, req);
1520 sdev->device_busy--; 1516 sdev->device_busy--;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b86f170fa8ed..327c5d7e5bd2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -587,6 +587,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
587 if (sdev->scsi_level >= 2 || 587 if (sdev->scsi_level >= 2 ||
588 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) 588 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
589 sdev->scsi_level++; 589 sdev->scsi_level++;
590 sdev->sdev_target->scsi_level = sdev->scsi_level;
590 591
591 return 0; 592 return 0;
592} 593}
@@ -771,6 +772,15 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
771 return SCSI_SCAN_LUN_PRESENT; 772 return SCSI_SCAN_LUN_PRESENT;
772} 773}
773 774
775static inline void scsi_destroy_sdev(struct scsi_device *sdev)
776{
777 if (sdev->host->hostt->slave_destroy)
778 sdev->host->hostt->slave_destroy(sdev);
779 transport_destroy_device(&sdev->sdev_gendev);
780 put_device(&sdev->sdev_gendev);
781}
782
783
774/** 784/**
775 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it 785 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
776 * @starget: pointer to target device structure 786 * @starget: pointer to target device structure
@@ -803,9 +813,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
803 * The rescan flag is used as an optimization, the first scan of a 813 * The rescan flag is used as an optimization, the first scan of a
804 * host adapter calls into here with rescan == 0. 814 * host adapter calls into here with rescan == 0.
805 */ 815 */
806 if (rescan) { 816 sdev = scsi_device_lookup_by_target(starget, lun);
807 sdev = scsi_device_lookup_by_target(starget, lun); 817 if (sdev) {
808 if (sdev) { 818 if (rescan || sdev->sdev_state != SDEV_CREATED) {
809 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 819 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
810 "scsi scan: device exists on %s\n", 820 "scsi scan: device exists on %s\n",
811 sdev->sdev_gendev.bus_id)); 821 sdev->sdev_gendev.bus_id));
@@ -820,9 +830,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
820 sdev->model); 830 sdev->model);
821 return SCSI_SCAN_LUN_PRESENT; 831 return SCSI_SCAN_LUN_PRESENT;
822 } 832 }
823 } 833 scsi_device_put(sdev);
824 834 } else
825 sdev = scsi_alloc_sdev(starget, lun, hostdata); 835 sdev = scsi_alloc_sdev(starget, lun, hostdata);
826 if (!sdev) 836 if (!sdev)
827 goto out; 837 goto out;
828 838
@@ -877,12 +887,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
877 res = SCSI_SCAN_NO_RESPONSE; 887 res = SCSI_SCAN_NO_RESPONSE;
878 } 888 }
879 } 889 }
880 } else { 890 } else
881 if (sdev->host->hostt->slave_destroy) 891 scsi_destroy_sdev(sdev);
882 sdev->host->hostt->slave_destroy(sdev);
883 transport_destroy_device(&sdev->sdev_gendev);
884 put_device(&sdev->sdev_gendev);
885 }
886 out: 892 out:
887 return res; 893 return res;
888} 894}
@@ -1054,7 +1060,7 @@ EXPORT_SYMBOL(int_to_scsilun);
1054 * 0: scan completed (or no memory, so further scanning is futile) 1060 * 0: scan completed (or no memory, so further scanning is futile)
1055 * 1: no report lun scan, or not configured 1061 * 1: no report lun scan, or not configured
1056 **/ 1062 **/
1057static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, 1063static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1058 int rescan) 1064 int rescan)
1059{ 1065{
1060 char devname[64]; 1066 char devname[64];
@@ -1067,7 +1073,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1067 struct scsi_lun *lunp, *lun_data; 1073 struct scsi_lun *lunp, *lun_data;
1068 u8 *data; 1074 u8 *data;
1069 struct scsi_sense_hdr sshdr; 1075 struct scsi_sense_hdr sshdr;
1070 struct scsi_target *starget = scsi_target(sdev); 1076 struct scsi_device *sdev;
1077 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1071 1078
1072 /* 1079 /*
1073 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. 1080 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
@@ -1075,15 +1082,23 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1075 * support more than 8 LUNs. 1082 * support more than 8 LUNs.
1076 */ 1083 */
1077 if ((bflags & BLIST_NOREPORTLUN) || 1084 if ((bflags & BLIST_NOREPORTLUN) ||
1078 sdev->scsi_level < SCSI_2 || 1085 starget->scsi_level < SCSI_2 ||
1079 (sdev->scsi_level < SCSI_3 && 1086 (starget->scsi_level < SCSI_3 &&
1080 (!(bflags & BLIST_REPORTLUN2) || sdev->host->max_lun <= 8)) ) 1087 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) )
1081 return 1; 1088 return 1;
1082 if (bflags & BLIST_NOLUN) 1089 if (bflags & BLIST_NOLUN)
1083 return 0; 1090 return 0;
1084 1091
1092 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1093 sdev = scsi_alloc_sdev(starget, 0, NULL);
1094 if (!sdev)
1095 return 0;
1096 if (scsi_device_get(sdev))
1097 return 0;
1098 }
1099
1085 sprintf(devname, "host %d channel %d id %d", 1100 sprintf(devname, "host %d channel %d id %d",
1086 sdev->host->host_no, sdev->channel, sdev->id); 1101 shost->host_no, sdev->channel, sdev->id);
1087 1102
1088 /* 1103 /*
1089 * Allocate enough to hold the header (the same size as one scsi_lun) 1104 * Allocate enough to hold the header (the same size as one scsi_lun)
@@ -1098,8 +1113,10 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1098 length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun); 1113 length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun);
1099 lun_data = kmalloc(length, GFP_ATOMIC | 1114 lun_data = kmalloc(length, GFP_ATOMIC |
1100 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1115 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1101 if (!lun_data) 1116 if (!lun_data) {
1117 printk(ALLOC_FAILURE_MSG, __FUNCTION__);
1102 goto out; 1118 goto out;
1119 }
1103 1120
1104 scsi_cmd[0] = REPORT_LUNS; 1121 scsi_cmd[0] = REPORT_LUNS;
1105 1122
@@ -1201,10 +1218,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1201 for (i = 0; i < sizeof(struct scsi_lun); i++) 1218 for (i = 0; i < sizeof(struct scsi_lun); i++)
1202 printk("%02x", data[i]); 1219 printk("%02x", data[i]);
1203 printk(" has a LUN larger than currently supported.\n"); 1220 printk(" has a LUN larger than currently supported.\n");
1204 } else if (lun == 0) {
1205 /*
1206 * LUN 0 has already been scanned.
1207 */
1208 } else if (lun > sdev->host->max_lun) { 1221 } else if (lun > sdev->host->max_lun) {
1209 printk(KERN_WARNING "scsi: %s lun%d has a LUN larger" 1222 printk(KERN_WARNING "scsi: %s lun%d has a LUN larger"
1210 " than allowed by the host adapter\n", 1223 " than allowed by the host adapter\n",
@@ -1227,13 +1240,13 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1227 } 1240 }
1228 1241
1229 kfree(lun_data); 1242 kfree(lun_data);
1230 return 0;
1231
1232 out: 1243 out:
1233 /* 1244 scsi_device_put(sdev);
1234 * We are out of memory, don't try scanning any further. 1245 if (sdev->sdev_state == SDEV_CREATED)
1235 */ 1246 /*
1236 printk(ALLOC_FAILURE_MSG, __FUNCTION__); 1247 * the sdev we used didn't appear in the report luns scan
1248 */
1249 scsi_destroy_sdev(sdev);
1237 return 0; 1250 return 0;
1238} 1251}
1239 1252
@@ -1299,7 +1312,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1299 struct Scsi_Host *shost = dev_to_shost(parent); 1312 struct Scsi_Host *shost = dev_to_shost(parent);
1300 int bflags = 0; 1313 int bflags = 0;
1301 int res; 1314 int res;
1302 struct scsi_device *sdev = NULL;
1303 struct scsi_target *starget; 1315 struct scsi_target *starget;
1304 1316
1305 if (shost->this_id == id) 1317 if (shost->this_id == id)
@@ -1325,27 +1337,16 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1325 * Scan LUN 0, if there is some response, scan further. Ideally, we 1337 * Scan LUN 0, if there is some response, scan further. Ideally, we
1326 * would not configure LUN 0 until all LUNs are scanned. 1338 * would not configure LUN 0 until all LUNs are scanned.
1327 */ 1339 */
1328 res = scsi_probe_and_add_lun(starget, 0, &bflags, &sdev, rescan, NULL); 1340 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1329 if (res == SCSI_SCAN_LUN_PRESENT) { 1341 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1330 if (scsi_report_lun_scan(sdev, bflags, rescan) != 0) 1342 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1331 /* 1343 /*
1332 * The REPORT LUN did not scan the target, 1344 * The REPORT LUN did not scan the target,
1333 * do a sequential scan. 1345 * do a sequential scan.
1334 */ 1346 */
1335 scsi_sequential_lun_scan(starget, bflags, 1347 scsi_sequential_lun_scan(starget, bflags,
1336 res, sdev->scsi_level, rescan); 1348 res, starget->scsi_level, rescan);
1337 } else if (res == SCSI_SCAN_TARGET_PRESENT) {
1338 /*
1339 * There's a target here, but lun 0 is offline so we
1340 * can't use the report_lun scan. Fall back to a
1341 * sequential lun scan with a bflags of SPARSELUN and
1342 * a default scsi level of SCSI_2
1343 */
1344 scsi_sequential_lun_scan(starget, BLIST_SPARSELUN,
1345 SCSI_SCAN_TARGET_PRESENT, SCSI_2, rescan);
1346 } 1349 }
1347 if (sdev)
1348 scsi_device_put(sdev);
1349 1350
1350 out_reap: 1351 out_reap:
1351 /* now determine if the target has any children at all 1352 /* now determine if the target has any children at all
@@ -1466,23 +1467,17 @@ EXPORT_SYMBOL(scsi_scan_single_target);
1466 1467
1467void scsi_forget_host(struct Scsi_Host *shost) 1468void scsi_forget_host(struct Scsi_Host *shost)
1468{ 1469{
1469 struct scsi_target *starget, *tmp; 1470 struct scsi_device *sdev;
1470 unsigned long flags; 1471 unsigned long flags;
1471 1472
1472 /* 1473 restart:
1473 * Ok, this look a bit strange. We always look for the first device
1474 * on the list as scsi_remove_device removes them from it - thus we
1475 * also have to release the lock.
1476 * We don't need to get another reference to the device before
1477 * releasing the lock as we already own the reference from
1478 * scsi_register_device that's release in scsi_remove_device. And
1479 * after that we don't look at sdev anymore.
1480 */
1481 spin_lock_irqsave(shost->host_lock, flags); 1474 spin_lock_irqsave(shost->host_lock, flags);
1482 list_for_each_entry_safe(starget, tmp, &shost->__targets, siblings) { 1475 list_for_each_entry(sdev, &shost->__devices, siblings) {
1476 if (sdev->sdev_state == SDEV_DEL)
1477 continue;
1483 spin_unlock_irqrestore(shost->host_lock, flags); 1478 spin_unlock_irqrestore(shost->host_lock, flags);
1484 scsi_remove_target(&starget->dev); 1479 __scsi_remove_device(sdev);
1485 spin_lock_irqsave(shost->host_lock, flags); 1480 goto restart;
1486 } 1481 }
1487 spin_unlock_irqrestore(shost->host_lock, flags); 1482 spin_unlock_irqrestore(shost->host_lock, flags);
1488} 1483}
@@ -1548,10 +1543,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
1548{ 1543{
1549 BUG_ON(sdev->id != sdev->host->this_id); 1544 BUG_ON(sdev->id != sdev->host->this_id);
1550 1545
1551 if (sdev->host->hostt->slave_destroy) 1546 scsi_destroy_sdev(sdev);
1552 sdev->host->hostt->slave_destroy(sdev);
1553 transport_destroy_device(&sdev->sdev_gendev);
1554 put_device(&sdev->sdev_gendev);
1555} 1547}
1556EXPORT_SYMBOL(scsi_free_host_dev); 1548EXPORT_SYMBOL(scsi_free_host_dev);
1557 1549
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b8052d5206cc..72a6550a056c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -57,6 +57,8 @@ static struct {
57 { SHOST_CANCEL, "cancel" }, 57 { SHOST_CANCEL, "cancel" },
58 { SHOST_DEL, "deleted" }, 58 { SHOST_DEL, "deleted" },
59 { SHOST_RECOVERY, "recovery" }, 59 { SHOST_RECOVERY, "recovery" },
60 { SHOST_CANCEL_RECOVERY, "cancel/recovery" },
61 { SHOST_DEL_RECOVERY, "deleted/recovery", },
60}; 62};
61const char *scsi_host_state_name(enum scsi_host_state state) 63const char *scsi_host_state_name(enum scsi_host_state state)
62{ 64{
@@ -707,9 +709,11 @@ void __scsi_remove_device(struct scsi_device *sdev)
707 **/ 709 **/
708void scsi_remove_device(struct scsi_device *sdev) 710void scsi_remove_device(struct scsi_device *sdev)
709{ 711{
710 down(&sdev->host->scan_mutex); 712 struct Scsi_Host *shost = sdev->host;
713
714 down(&shost->scan_mutex);
711 __scsi_remove_device(sdev); 715 __scsi_remove_device(sdev);
712 up(&sdev->host->scan_mutex); 716 up(&shost->scan_mutex);
713} 717}
714EXPORT_SYMBOL(scsi_remove_device); 718EXPORT_SYMBOL(scsi_remove_device);
715 719
@@ -717,17 +721,20 @@ void __scsi_remove_target(struct scsi_target *starget)
717{ 721{
718 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 722 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
719 unsigned long flags; 723 unsigned long flags;
720 struct scsi_device *sdev, *tmp; 724 struct scsi_device *sdev;
721 725
722 spin_lock_irqsave(shost->host_lock, flags); 726 spin_lock_irqsave(shost->host_lock, flags);
723 starget->reap_ref++; 727 starget->reap_ref++;
724 list_for_each_entry_safe(sdev, tmp, &shost->__devices, siblings) { 728 restart:
729 list_for_each_entry(sdev, &shost->__devices, siblings) {
725 if (sdev->channel != starget->channel || 730 if (sdev->channel != starget->channel ||
726 sdev->id != starget->id) 731 sdev->id != starget->id ||
732 sdev->sdev_state == SDEV_DEL)
727 continue; 733 continue;
728 spin_unlock_irqrestore(shost->host_lock, flags); 734 spin_unlock_irqrestore(shost->host_lock, flags);
729 scsi_remove_device(sdev); 735 scsi_remove_device(sdev);
730 spin_lock_irqsave(shost->host_lock, flags); 736 spin_lock_irqsave(shost->host_lock, flags);
737 goto restart;
731 } 738 }
732 spin_unlock_irqrestore(shost->host_lock, flags); 739 spin_unlock_irqrestore(shost->host_lock, flags);
733 scsi_target_reap(starget); 740 scsi_target_reap(starget);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index ff724bbe6611..1d145d2f9a38 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -628,17 +628,16 @@ sas_rphy_delete(struct sas_rphy *rphy)
628 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); 628 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
629 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); 629 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
630 630
631 transport_destroy_device(&rphy->dev); 631 scsi_remove_target(dev);
632 632
633 scsi_remove_target(&rphy->dev); 633 transport_remove_device(dev);
634 device_del(dev);
635 transport_destroy_device(dev);
634 636
635 spin_lock(&sas_host->lock); 637 spin_lock(&sas_host->lock);
636 list_del(&rphy->list); 638 list_del(&rphy->list);
637 spin_unlock(&sas_host->lock); 639 spin_unlock(&sas_host->lock);
638 640
639 transport_remove_device(dev);
640 device_del(dev);
641 transport_destroy_device(dev);
642 put_device(&parent->dev); 641 put_device(&parent->dev);
643} 642}
644EXPORT_SYMBOL(sas_rphy_delete); 643EXPORT_SYMBOL(sas_rphy_delete);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index de564b386052..9a1dc0cea03c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -235,6 +235,7 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
235 return 0; 235 return 0;
236 236
237 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 237 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
238 SCpnt->cmd_len = rq->cmd_len;
238 if (rq_data_dir(rq) == WRITE) 239 if (rq_data_dir(rq) == WRITE)
239 SCpnt->sc_data_direction = DMA_TO_DEVICE; 240 SCpnt->sc_data_direction = DMA_TO_DEVICE;
240 else if (rq->data_len) 241 else if (rq->data_len)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9ea4765d1d12..ad94367df430 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1027,7 +1027,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
1027 if (sdp->detached) 1027 if (sdp->detached)
1028 return -ENODEV; 1028 return -ENODEV;
1029 if (filp->f_flags & O_NONBLOCK) { 1029 if (filp->f_flags & O_NONBLOCK) {
1030 if (sdp->device->host->shost_state == SHOST_RECOVERY) 1030 if (scsi_host_in_recovery(sdp->device->host))
1031 return -EBUSY; 1031 return -EBUSY;
1032 } else if (!scsi_block_when_processing_errors(sdp->device)) 1032 } else if (!scsi_block_when_processing_errors(sdp->device))
1033 return -EBUSY; 1033 return -EBUSY;
@@ -2849,8 +2849,7 @@ sg_proc_init(void)
2849 struct proc_dir_entry *pdep; 2849 struct proc_dir_entry *pdep;
2850 struct sg_proc_leaf * leaf; 2850 struct sg_proc_leaf * leaf;
2851 2851
2852 sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname, 2852 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2853 S_IFDIR | S_IRUGO | S_IXUGO, NULL);
2854 if (!sg_proc_sgp) 2853 if (!sg_proc_sgp)
2855 return 1; 2854 return 1;
2856 for (k = 0; k < num_leaves; ++k) { 2855 for (k = 0; k < num_leaves; ++k) {
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ce63fc8312dc..561901b1cf11 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -326,6 +326,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
326 return 0; 326 return 0;
327 327
328 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 328 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
329 SCpnt->cmd_len = rq->cmd_len;
329 if (!rq->data_len) 330 if (!rq->data_len)
330 SCpnt->sc_data_direction = DMA_NONE; 331 SCpnt->sc_data_direction = DMA_NONE;
331 else if (rq_data_dir(rq) == WRITE) 332 else if (rq_data_dir(rq) == WRITE)
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index a93308ae9736..d001c046551b 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4206,6 +4206,7 @@ static int st_init_command(struct scsi_cmnd *SCpnt)
4206 return 0; 4206 return 0;
4207 4207
4208 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd)); 4208 memcpy(SCpnt->cmnd, rq->cmd, sizeof(SCpnt->cmnd));
4209 SCpnt->cmd_len = rq->cmd_len;
4209 4210
4210 if (rq_data_dir(rq) == WRITE) 4211 if (rq_data_dir(rq) == WRITE)
4211 SCpnt->sc_data_direction = DMA_TO_DEVICE; 4212 SCpnt->sc_data_direction = DMA_TO_DEVICE;
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index aec39fb261ca..b5cf39468d18 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -463,7 +463,7 @@ static int __init serial21285_console_setup(struct console *co, char *options)
463 return uart_set_options(port, co, baud, parity, bits, flow); 463 return uart_set_options(port, co, baud, parity, bits, flow);
464} 464}
465 465
466extern struct uart_driver serial21285_reg; 466static struct uart_driver serial21285_reg;
467 467
468static struct console serial21285_console = 468static struct console serial21285_console =
469{ 469{
diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c
index 978e12437e61..679e678c7e6a 100644
--- a/drivers/serial/amba-pl010.c
+++ b/drivers/serial/amba-pl010.c
@@ -689,7 +689,7 @@ static int __init pl010_console_setup(struct console *co, char *options)
689 return uart_set_options(port, co, baud, parity, bits, flow); 689 return uart_set_options(port, co, baud, parity, bits, flow);
690} 690}
691 691
692extern struct uart_driver amba_reg; 692static struct uart_driver amba_reg;
693static struct console amba_console = { 693static struct console amba_console = {
694 .name = "ttyAM", 694 .name = "ttyAM",
695 .write = pl010_console_write, 695 .write = pl010_console_write,
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index 56071309744c..1ff629c74750 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -701,7 +701,7 @@ static int __init pl011_console_setup(struct console *co, char *options)
701 return uart_set_options(&uap->port, co, baud, parity, bits, flow); 701 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
702} 702}
703 703
704extern struct uart_driver amba_reg; 704static struct uart_driver amba_reg;
705static struct console amba_console = { 705static struct console amba_console = {
706 .name = "ttyAMA", 706 .name = "ttyAMA",
707 .write = pl011_console_write, 707 .write = pl011_console_write,
diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c
index d822896b488c..87ef368384fb 100644
--- a/drivers/serial/clps711x.c
+++ b/drivers/serial/clps711x.c
@@ -98,7 +98,7 @@ static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id, struct pt_regs *re
98{ 98{
99 struct uart_port *port = dev_id; 99 struct uart_port *port = dev_id;
100 struct tty_struct *tty = port->info->tty; 100 struct tty_struct *tty = port->info->tty;
101 unsigned int status, ch, flg, ignored = 0; 101 unsigned int status, ch, flg;
102 102
103 status = clps_readl(SYSFLG(port)); 103 status = clps_readl(SYSFLG(port));
104 while (!(status & SYSFLG_URXFE)) { 104 while (!(status & SYSFLG_URXFE)) {
@@ -525,7 +525,7 @@ static int __init clps711xuart_console_setup(struct console *co, char *options)
525 return uart_set_options(port, co, baud, parity, bits, flow); 525 return uart_set_options(port, co, baud, parity, bits, flow);
526} 526}
527 527
528extern struct uart_driver clps711x_reg; 528static struct uart_driver clps711x_reg;
529static struct console clps711x_console = { 529static struct console clps711x_console = {
530 .name = "ttyCL", 530 .name = "ttyCL",
531 .write = clps711xuart_console_write, 531 .write = clps711xuart_console_write,
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 4c985e6b3784..4e1e80adaf11 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -860,7 +860,7 @@ imx_console_setup(struct console *co, char *options)
860 return uart_set_options(&sport->port, co, baud, parity, bits, flow); 860 return uart_set_options(&sport->port, co, baud, parity, bits, flow);
861} 861}
862 862
863extern struct uart_driver imx_reg; 863static struct uart_driver imx_reg;
864static struct console imx_console = { 864static struct console imx_console = {
865 .name = "ttySMX", 865 .name = "ttySMX",
866 .write = imx_console_write, 866 .write = imx_console_write,
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c
index 0c5c96a582b3..f88fdd480685 100644
--- a/drivers/serial/ioc4_serial.c
+++ b/drivers/serial/ioc4_serial.c
@@ -973,18 +973,6 @@ static irqreturn_t ioc4_intr(int irq, void *arg, struct pt_regs *regs)
973 this_ir &= ~this_mir; 973 this_ir &= ~this_mir;
974 } 974 }
975 } 975 }
976 if (this_ir) {
977 printk(KERN_ERR
978 "unknown IOC4 %s interrupt 0x%x, sio_ir = 0x%x,"
979 " sio_ies = 0x%x, other_ir = 0x%x :"
980 "other_ies = 0x%x\n",
981 (intr_type == IOC4_SIO_INTR_TYPE) ? "sio" :
982 "other", this_ir,
983 readl(&soft->is_ioc4_misc_addr->sio_ir.raw),
984 readl(&soft->is_ioc4_misc_addr->sio_ies.raw),
985 readl(&soft->is_ioc4_misc_addr->other_ir.raw),
986 readl(&soft->is_ioc4_misc_addr->other_ies.raw));
987 }
988 } 976 }
989#ifdef DEBUG_INTERRUPTS 977#ifdef DEBUG_INTERRUPTS
990 { 978 {
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index a3cd0ee8486d..0585ab27ffde 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -781,7 +781,7 @@ mpc52xx_uart_remove(struct device *dev)
781 781
782#ifdef CONFIG_PM 782#ifdef CONFIG_PM
783static int 783static int
784mpc52xx_uart_suspend(struct device *dev, u32 state, u32 level) 784mpc52xx_uart_suspend(struct device *dev, pm_message_t state, u32 level)
785{ 785{
786 struct uart_port *port = (struct uart_port *) dev_get_drvdata(dev); 786 struct uart_port *port = (struct uart_port *) dev_get_drvdata(dev);
787 787
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index eaa0af835290..672b359b07ce 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -589,8 +589,8 @@ serial_pxa_type(struct uart_port *port)
589 589
590#ifdef CONFIG_SERIAL_PXA_CONSOLE 590#ifdef CONFIG_SERIAL_PXA_CONSOLE
591 591
592extern struct uart_pxa_port serial_pxa_ports[]; 592static struct uart_pxa_port serial_pxa_ports[];
593extern struct uart_driver serial_pxa_reg; 593static struct uart_driver serial_pxa_reg;
594 594
595#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 595#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
596 596
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index c361c6fb0809..50d7870d92bb 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -82,8 +82,6 @@
82#include <asm/arch/regs-serial.h> 82#include <asm/arch/regs-serial.h>
83#include <asm/arch/regs-gpio.h> 83#include <asm/arch/regs-gpio.h>
84 84
85#include <asm/mach-types.h>
86
87/* structures */ 85/* structures */
88 86
89struct s3c24xx_uart_info { 87struct s3c24xx_uart_info {
@@ -753,8 +751,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
753{ 751{
754 struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port); 752 struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
755 struct s3c24xx_uart_port *ourport = to_ourport(port); 753 struct s3c24xx_uart_port *ourport = to_ourport(port);
756 struct s3c24xx_uart_clksrc *clksrc; 754 struct s3c24xx_uart_clksrc *clksrc = NULL;
757 struct clk *clk; 755 struct clk *clk = NULL;
758 unsigned long flags; 756 unsigned long flags;
759 unsigned int baud, quot; 757 unsigned int baud, quot;
760 unsigned int ulcon; 758 unsigned int ulcon;
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 1225b14f6e9d..dd8aed242357 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -799,7 +799,7 @@ sa1100_console_setup(struct console *co, char *options)
799 return uart_set_options(&sport->port, co, baud, parity, bits, flow); 799 return uart_set_options(&sport->port, co, baud, parity, bits, flow);
800} 800}
801 801
802extern struct uart_driver sa1100_reg; 802static struct uart_driver sa1100_reg;
803static struct console sa1100_console = { 803static struct console sa1100_console = {
804 .name = "ttySA", 804 .name = "ttySA",
805 .write = sa1100_console_write, 805 .write = sa1100_console_write,
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 1ae0b381c162..2c7d3ef76e8e 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -859,6 +859,7 @@ static struct pcmcia_device_id serial_ids[] = {
859 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), 859 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"),
860 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"), 860 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"),
861 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"), 861 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"),
862 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
862 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"), 863 PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"),
863 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), 864 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"),
864 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), 865 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c
index 8302376800c0..d01dbe5da3b9 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/serial/serial_lh7a40x.c
@@ -632,7 +632,7 @@ static int __init lh7a40xuart_console_setup (struct console* co, char* options)
632 return uart_set_options (port, co, baud, parity, bits, flow); 632 return uart_set_options (port, co, baud, parity, bits, flow);
633} 633}
634 634
635extern struct uart_driver lh7a40x_reg; 635static struct uart_driver lh7a40x_reg;
636static struct console lh7a40x_console = { 636static struct console lh7a40x_console = {
637 .name = "ttyAM", 637 .name = "ttyAM",
638 .write = lh7a40xuart_console_write, 638 .write = lh7a40xuart_console_write,
diff --git a/drivers/tc/zs.c b/drivers/tc/zs.c
index 4382ee60b6a8..6bed8713897e 100644
--- a/drivers/tc/zs.c
+++ b/drivers/tc/zs.c
@@ -1683,7 +1683,7 @@ static void __init probe_sccs(void)
1683#ifndef CONFIG_SERIAL_DEC_CONSOLE 1683#ifndef CONFIG_SERIAL_DEC_CONSOLE
1684 /* 1684 /*
1685 * We're called early and memory managment isn't up, yet. 1685 * We're called early and memory managment isn't up, yet.
1686 * Thus check_region would fail. 1686 * Thus request_region would fail.
1687 */ 1687 */
1688 if (!request_region((unsigned long) 1688 if (!request_region((unsigned long)
1689 zs_channels[n_channels].control, 1689 zs_channels[n_channels].control,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index cbb451d227d2..6385d1a99b60 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -242,7 +242,6 @@ int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
242 case HC_STATE_SUSPENDED: 242 case HC_STATE_SUSPENDED:
243 /* no DMA or IRQs except when HC is active */ 243 /* no DMA or IRQs except when HC is active */
244 if (dev->current_state == PCI_D0) { 244 if (dev->current_state == PCI_D0) {
245 free_irq (hcd->irq, hcd);
246 pci_save_state (dev); 245 pci_save_state (dev);
247 pci_disable_device (dev); 246 pci_disable_device (dev);
248 } 247 }
@@ -374,14 +373,6 @@ int usb_hcd_pci_resume (struct pci_dev *dev)
374 373
375 hcd->state = HC_STATE_RESUMING; 374 hcd->state = HC_STATE_RESUMING;
376 hcd->saw_irq = 0; 375 hcd->saw_irq = 0;
377 retval = request_irq (dev->irq, usb_hcd_irq, SA_SHIRQ,
378 hcd->irq_descr, hcd);
379 if (retval < 0) {
380 dev_err (hcd->self.controller,
381 "can't restore IRQ after resume!\n");
382 usb_hc_died (hcd);
383 return retval;
384 }
385 376
386 retval = hcd->driver->resume (hcd); 377 retval = hcd->driver->resume (hcd);
387 if (!HC_IS_RUNNING (hcd->state)) { 378 if (!HC_IS_RUNNING (hcd->state)) {
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index c47c8052b486..f1fb67fe22a8 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -987,7 +987,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
987 987
988 /* remove this interface if it has been registered */ 988 /* remove this interface if it has been registered */
989 interface = dev->actconfig->interface[i]; 989 interface = dev->actconfig->interface[i];
990 if (!klist_node_attached(&interface->dev.knode_bus)) 990 if (!device_is_registered(&interface->dev))
991 continue; 991 continue;
992 dev_dbg (&dev->dev, "unregistering interface %s\n", 992 dev_dbg (&dev->dev, "unregistering interface %s\n",
993 interface->dev.bus_id); 993 interface->dev.bus_id);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 087af73a59dd..7d131509e419 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -303,7 +303,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
303 /* if interface was already added, bind now; else let 303 /* if interface was already added, bind now; else let
304 * the future device_add() bind it, bypassing probe() 304 * the future device_add() bind it, bypassing probe()
305 */ 305 */
306 if (klist_node_attached(&dev->knode_bus)) 306 if (device_is_registered(dev))
307 device_bind_driver(dev); 307 device_bind_driver(dev);
308 308
309 return 0; 309 return 0;
@@ -336,8 +336,8 @@ void usb_driver_release_interface(struct usb_driver *driver,
336 if (iface->condition != USB_INTERFACE_BOUND) 336 if (iface->condition != USB_INTERFACE_BOUND)
337 return; 337 return;
338 338
339 /* release only after device_add() */ 339 /* don't release if the interface hasn't been added yet */
340 if (klist_node_attached(&dev->knode_bus)) { 340 if (device_is_registered(dev)) {
341 iface->condition = USB_INTERFACE_UNBINDING; 341 iface->condition = USB_INTERFACE_UNBINDING;
342 device_release_driver(dev); 342 device_release_driver(dev);
343 } 343 }
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 1507738337c4..73f8c9404156 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -422,7 +422,7 @@ static inline void ep0_idle (struct pxa2xx_udc *dev)
422} 422}
423 423
424static int 424static int
425write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max) 425write_packet(volatile unsigned long *uddr, struct pxa2xx_request *req, unsigned max)
426{ 426{
427 u8 *buf; 427 u8 *buf;
428 unsigned length, count; 428 unsigned length, count;
@@ -2602,7 +2602,7 @@ static int __exit pxa2xx_udc_remove(struct device *_dev)
2602 * VBUS IRQs should probably be ignored so that the PXA device just acts 2602 * VBUS IRQs should probably be ignored so that the PXA device just acts
2603 * "dead" to USB hosts until system resume. 2603 * "dead" to USB hosts until system resume.
2604 */ 2604 */
2605static int pxa2xx_udc_suspend(struct device *dev, u32 state, u32 level) 2605static int pxa2xx_udc_suspend(struct device *dev, pm_message_t state, u32 level)
2606{ 2606{
2607 struct pxa2xx_udc *udc = dev_get_drvdata(dev); 2607 struct pxa2xx_udc *udc = dev_get_drvdata(dev);
2608 2608
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h
index d0bc396a85d5..a58f3e6e71f1 100644
--- a/drivers/usb/gadget/pxa2xx_udc.h
+++ b/drivers/usb/gadget/pxa2xx_udc.h
@@ -69,11 +69,11 @@ struct pxa2xx_ep {
69 * UDDR = UDC Endpoint Data Register (the fifo) 69 * UDDR = UDC Endpoint Data Register (the fifo)
70 * DRCM = DMA Request Channel Map 70 * DRCM = DMA Request Channel Map
71 */ 71 */
72 volatile u32 *reg_udccs; 72 volatile unsigned long *reg_udccs;
73 volatile u32 *reg_ubcr; 73 volatile unsigned long *reg_ubcr;
74 volatile u32 *reg_uddr; 74 volatile unsigned long *reg_uddr;
75#ifdef USE_DMA 75#ifdef USE_DMA
76 volatile u32 *reg_drcmr; 76 volatile unsigned long *reg_drcmr;
77#define drcmr(n) .reg_drcmr = & DRCMR ## n , 77#define drcmr(n) .reg_drcmr = & DRCMR ## n ,
78#else 78#else
79#define drcmr(n) 79#define drcmr(n)
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index 817620d73841..859aca7be753 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -17,8 +17,6 @@
17 */ 17 */
18 18
19#include <asm/hardware.h> 19#include <asm/hardware.h>
20#include <asm/mach-types.h>
21#include <asm/arch/hardware.h>
22 20
23 21
24extern int usb_disabled(void); 22extern int usb_disabled(void);
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 5cde76faab93..d8f3ba7ad52e 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -18,7 +18,6 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/mach-types.h> 19#include <asm/mach-types.h>
20 20
21#include <asm/arch/hardware.h>
22#include <asm/arch/mux.h> 21#include <asm/arch/mux.h>
23#include <asm/arch/irqs.h> 22#include <asm/arch/irqs.h>
24#include <asm/arch/gpio.h> 23#include <asm/arch/gpio.h>
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 3d9bcf78a9a4..da7d5478f74d 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -20,7 +20,6 @@
20*/ 20*/
21 21
22#include <asm/hardware.h> 22#include <asm/hardware.h>
23#include <asm/mach-types.h>
24#include <asm/hardware/clock.h> 23#include <asm/hardware/clock.h>
25#include <asm/arch/usb-control.h> 24#include <asm/arch/usb-control.h>
26 25
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index d2a1fd40dfcb..d42a15d10a46 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -782,6 +782,9 @@ retry:
782/* usb 1.1 says max 90% of a frame is available for periodic transfers. 782/* usb 1.1 says max 90% of a frame is available for periodic transfers.
783 * this driver doesn't promise that much since it's got to handle an 783 * this driver doesn't promise that much since it's got to handle an
784 * IRQ per packet; irq handling latencies also use up that time. 784 * IRQ per packet; irq handling latencies also use up that time.
785 *
786 * NOTE: the periodic schedule is a sparse tree, with the load for
787 * each branch minimized. see fig 3.5 in the OHCI spec for example.
785 */ 788 */
786#define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */ 789#define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */
787 790
@@ -843,6 +846,7 @@ static int sl811h_urb_enqueue(
843 if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)) 846 if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE))
844 || !HC_IS_RUNNING(hcd->state)) { 847 || !HC_IS_RUNNING(hcd->state)) {
845 retval = -ENODEV; 848 retval = -ENODEV;
849 kfree(ep);
846 goto fail; 850 goto fail;
847 } 851 }
848 852
@@ -911,8 +915,16 @@ static int sl811h_urb_enqueue(
911 case PIPE_ISOCHRONOUS: 915 case PIPE_ISOCHRONOUS:
912 case PIPE_INTERRUPT: 916 case PIPE_INTERRUPT:
913 urb->interval = ep->period; 917 urb->interval = ep->period;
914 if (ep->branch < PERIODIC_SIZE) 918 if (ep->branch < PERIODIC_SIZE) {
919 /* NOTE: the phase is correct here, but the value
920 * needs offsetting by the transfer queue depth.
921 * All current drivers ignore start_frame, so this
922 * is unlikely to ever matter...
923 */
924 urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1))
925 + ep->branch;
915 break; 926 break;
927 }
916 928
917 retval = balance(sl811, ep->period, ep->load); 929 retval = balance(sl811, ep->period, ep->load);
918 if (retval < 0) 930 if (retval < 0)
@@ -1122,7 +1134,7 @@ sl811h_hub_descriptor (
1122 desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp); 1134 desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
1123 1135
1124 /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */ 1136 /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
1125 desc->bitmap[0] = 1 << 1; 1137 desc->bitmap[0] = 0 << 1;
1126 desc->bitmap[1] = ~0; 1138 desc->bitmap[1] = ~0;
1127} 1139}
1128 1140
diff --git a/drivers/usb/media/vicam.c b/drivers/usb/media/vicam.c
index 4a5857c53f11..0bc0b1247a6b 100644
--- a/drivers/usb/media/vicam.c
+++ b/drivers/usb/media/vicam.c
@@ -1148,7 +1148,7 @@ vicam_write_proc_gain(struct file *file, const char *buffer,
1148static void 1148static void
1149vicam_create_proc_root(void) 1149vicam_create_proc_root(void)
1150{ 1150{
1151 vicam_proc_root = create_proc_entry("video/vicam", S_IFDIR, 0); 1151 vicam_proc_root = proc_mkdir("video/vicam", NULL);
1152 1152
1153 if (vicam_proc_root) 1153 if (vicam_proc_root)
1154 vicam_proc_root->owner = THIS_MODULE; 1154 vicam_proc_root->owner = THIS_MODULE;
@@ -1181,7 +1181,7 @@ vicam_create_proc_entry(struct vicam_camera *cam)
1181 1181
1182 sprintf(name, "video%d", cam->vdev.minor); 1182 sprintf(name, "video%d", cam->vdev.minor);
1183 1183
1184 cam->proc_dir = create_proc_entry(name, S_IFDIR, vicam_proc_root); 1184 cam->proc_dir = proc_mkdir(name, vicam_proc_root);
1185 1185
1186 if ( !cam->proc_dir ) 1186 if ( !cam->proc_dir )
1187 return; // FIXME: We should probably return an error here 1187 return; // FIXME: We should probably return an error here
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c
index 7484d34780fc..6a4ffe6c3977 100644
--- a/drivers/usb/net/pegasus.c
+++ b/drivers/usb/net/pegasus.c
@@ -648,6 +648,13 @@ static void read_bulk_callback(struct urb *urb, struct pt_regs *regs)
648 } 648 }
649 649
650 /* 650 /*
651 * If the packet is unreasonably long, quietly drop it rather than
652 * kernel panicing by calling skb_put.
653 */
654 if (pkt_len > PEGASUS_MTU)
655 goto goon;
656
657 /*
651 * at this point we are sure pegasus->rx_skb != NULL 658 * at this point we are sure pegasus->rx_skb != NULL
652 * so we go ahead and pass up the packet. 659 * so we go ahead and pass up the packet.
653 */ 660 */
@@ -886,15 +893,17 @@ static inline void get_interrupt_interval(pegasus_t * pegasus)
886 __u8 data[2]; 893 __u8 data[2];
887 894
888 read_eprom_word(pegasus, 4, (__u16 *) data); 895 read_eprom_word(pegasus, 4, (__u16 *) data);
889 if (data[1] < 0x80) { 896 if (pegasus->usb->speed != USB_SPEED_HIGH) {
890 if (netif_msg_timer(pegasus)) 897 if (data[1] < 0x80) {
891 dev_info(&pegasus->intf->dev, 898 if (netif_msg_timer(pegasus))
892 "intr interval changed from %ums to %ums\n", 899 dev_info(&pegasus->intf->dev, "intr interval "
893 data[1], 0x80); 900 "changed from %ums to %ums\n",
894 data[1] = 0x80; 901 data[1], 0x80);
895#ifdef PEGASUS_WRITE_EEPROM 902 data[1] = 0x80;
896 write_eprom_word(pegasus, 4, *(__u16 *) data); 903#ifdef PEGASUS_WRITE_EEPROM
904 write_eprom_word(pegasus, 4, *(__u16 *) data);
897#endif 905#endif
906 }
898 } 907 }
899 pegasus->intr_interval = data[1]; 908 pegasus->intr_interval = data[1];
900} 909}
@@ -904,8 +913,9 @@ static void set_carrier(struct net_device *net)
904 pegasus_t *pegasus = netdev_priv(net); 913 pegasus_t *pegasus = netdev_priv(net);
905 u16 tmp; 914 u16 tmp;
906 915
907 if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) 916 if (!read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp))
908 return; 917 return;
918
909 if (tmp & BMSR_LSTATUS) 919 if (tmp & BMSR_LSTATUS)
910 netif_carrier_on(net); 920 netif_carrier_on(net);
911 else 921 else
@@ -1355,6 +1365,7 @@ static void pegasus_disconnect(struct usb_interface *intf)
1355 cancel_delayed_work(&pegasus->carrier_check); 1365 cancel_delayed_work(&pegasus->carrier_check);
1356 unregister_netdev(pegasus->net); 1366 unregister_netdev(pegasus->net);
1357 usb_put_dev(interface_to_usbdev(intf)); 1367 usb_put_dev(interface_to_usbdev(intf));
1368 unlink_all_urbs(pegasus);
1358 free_all_urbs(pegasus); 1369 free_all_urbs(pegasus);
1359 free_skb_pool(pegasus); 1370 free_skb_pool(pegasus);
1360 if (pegasus->rx_skb) 1371 if (pegasus->rx_skb)
diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c
index a4ce0008d69b..926d4c2c1600 100644
--- a/drivers/usb/serial/airprime.c
+++ b/drivers/usb/serial/airprime.c
@@ -16,7 +16,8 @@
16#include "usb-serial.h" 16#include "usb-serial.h"
17 17
18static struct usb_device_id id_table [] = { 18static struct usb_device_id id_table [] = {
19 { USB_DEVICE(0xf3d, 0x0112) }, 19 { USB_DEVICE(0xf3d, 0x0112) }, /* AirPrime CDMA Wireless PC Card */
20 { USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */
20 { }, 21 { },
21}; 22};
22MODULE_DEVICE_TABLE(usb, id_table); 23MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4e434cb10bb1..5a8631c8a4a7 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1846,10 +1846,12 @@ static void ftdi_set_termios (struct usb_serial_port *port, struct termios *old_
1846 } else { 1846 } else {
1847 /* set the baudrate determined before */ 1847 /* set the baudrate determined before */
1848 if (change_speed(port)) { 1848 if (change_speed(port)) {
1849 err("%s urb failed to set baurdrate", __FUNCTION__); 1849 err("%s urb failed to set baudrate", __FUNCTION__);
1850 }
1851 /* Ensure RTS and DTR are raised when baudrate changed from 0 */
1852 if ((old_termios->c_cflag & CBAUD) == B0) {
1853 set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1850 } 1854 }
1851 /* Ensure RTS and DTR are raised */
1852 set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
1853 } 1855 }
1854 1856
1855 /* Set flow control */ 1857 /* Set flow control */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 92d0f925d053..4989e5740d18 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -25,6 +25,9 @@
25 2005-06-20 v0.4.1 add missing braces :-/ 25 2005-06-20 v0.4.1 add missing braces :-/
26 killed end-of-line whitespace 26 killed end-of-line whitespace
27 2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2 27 2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2
28 2005-09-10 v0.4.3 added HUAWEI E600 card and Audiovox AirCard
29 2005-09-20 v0.4.4 increased recv buffer size: the card sometimes
30 wants to send >2000 bytes.
28 31
29 Work sponsored by: Sigos GmbH, Germany <info@sigos.de> 32 Work sponsored by: Sigos GmbH, Germany <info@sigos.de>
30 33
@@ -71,15 +74,21 @@ static int option_send_setup(struct usb_serial_port *port);
71 74
72/* Vendor and product IDs */ 75/* Vendor and product IDs */
73#define OPTION_VENDOR_ID 0x0AF0 76#define OPTION_VENDOR_ID 0x0AF0
77#define HUAWEI_VENDOR_ID 0x12D1
78#define AUDIOVOX_VENDOR_ID 0x0F3D
74 79
75#define OPTION_PRODUCT_OLD 0x5000 80#define OPTION_PRODUCT_OLD 0x5000
76#define OPTION_PRODUCT_FUSION 0x6000 81#define OPTION_PRODUCT_FUSION 0x6000
77#define OPTION_PRODUCT_FUSION2 0x6300 82#define OPTION_PRODUCT_FUSION2 0x6300
83#define HUAWEI_PRODUCT_E600 0x1001
84#define AUDIOVOX_PRODUCT_AIRCARD 0x0112
78 85
79static struct usb_device_id option_ids[] = { 86static struct usb_device_id option_ids[] = {
80 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) }, 87 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) },
81 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION) }, 88 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION) },
82 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION2) }, 89 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION2) },
90 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) },
91 { USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) },
83 { } /* Terminating entry */ 92 { } /* Terminating entry */
84}; 93};
85 94
@@ -132,7 +141,7 @@ static int debug;
132 141
133#define N_IN_URB 4 142#define N_IN_URB 4
134#define N_OUT_URB 1 143#define N_OUT_URB 1
135#define IN_BUFLEN 1024 144#define IN_BUFLEN 4096
136#define OUT_BUFLEN 128 145#define OUT_BUFLEN 128
137 146
138struct option_port_private { 147struct option_port_private {
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 615874e03ce8..1cd942abb580 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -650,6 +650,7 @@ config FB_NVIDIA
650 select FB_CFB_FILLRECT 650 select FB_CFB_FILLRECT
651 select FB_CFB_COPYAREA 651 select FB_CFB_COPYAREA
652 select FB_CFB_IMAGEBLIT 652 select FB_CFB_IMAGEBLIT
653 select FB_SOFT_CURSOR
653 help 654 help
654 This driver supports graphics boards with the nVidia chips, TNT 655 This driver supports graphics boards with the nVidia chips, TNT
655 and newer. For very old chipsets, such as the RIVA128, then use 656 and newer. For very old chipsets, such as the RIVA128, then use
@@ -753,7 +754,8 @@ config FB_I810_GTF
753 754
754config FB_I810_I2C 755config FB_I810_I2C
755 bool "Enable DDC Support" 756 bool "Enable DDC Support"
756 depends on FB_I810 && I2C && FB_I810_GTF 757 depends on FB_I810 && FB_I810_GTF
758 select I2C
757 select I2C_ALGOBIT 759 select I2C_ALGOBIT
758 help 760 help
759 761
@@ -766,6 +768,7 @@ config FB_INTEL
766 select FB_CFB_FILLRECT 768 select FB_CFB_FILLRECT
767 select FB_CFB_COPYAREA 769 select FB_CFB_COPYAREA
768 select FB_CFB_IMAGEBLIT 770 select FB_CFB_IMAGEBLIT
771 select FB_SOFT_CURSOR
769 help 772 help
770 This driver supports the on-board graphics built in to the Intel 773 This driver supports the on-board graphics built in to the Intel
771 830M/845G/852GM/855GM/865G chipsets. 774 830M/845G/852GM/855GM/865G chipsets.
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 046b47860266..8a24a66d9ba8 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -475,7 +475,7 @@ static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo)
475 */ 475 */
476 476
477 /* Flush PCI buffers ? */ 477 /* Flush PCI buffers ? */
478 tmp = INREG(DEVICE_ID); 478 tmp = INREG16(DEVICE_ID);
479 479
480 local_irq_disable(); 480 local_irq_disable();
481 481
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index 59a1b6f85067..097d668c4fe5 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -62,9 +62,9 @@ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo)
62 OUTPLL(pllSCLK_CNTL, tmp); 62 OUTPLL(pllSCLK_CNTL, tmp);
63 return; 63 return;
64 } 64 }
65 /* RV350 (M10) */ 65 /* RV350 (M10/M11) */
66 if (rinfo->family == CHIP_FAMILY_RV350) { 66 if (rinfo->family == CHIP_FAMILY_RV350) {
67 /* for RV350/M10, no delays are required. */ 67 /* for RV350/M10/M11, no delays are required. */
68 tmp = INPLL(pllSCLK_CNTL2); 68 tmp = INPLL(pllSCLK_CNTL2);
69 tmp |= (SCLK_CNTL2__R300_FORCE_TCL | 69 tmp |= (SCLK_CNTL2__R300_FORCE_TCL |
70 SCLK_CNTL2__R300_FORCE_GA | 70 SCLK_CNTL2__R300_FORCE_GA |
@@ -248,7 +248,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo)
248 return; 248 return;
249 } 249 }
250 250
251 /* M10 */ 251 /* M10/M11 */
252 if (rinfo->family == CHIP_FAMILY_RV350) { 252 if (rinfo->family == CHIP_FAMILY_RV350) {
253 tmp = INPLL(pllSCLK_CNTL2); 253 tmp = INPLL(pllSCLK_CNTL2);
254 tmp &= ~(SCLK_CNTL2__R300_FORCE_TCL | 254 tmp &= ~(SCLK_CNTL2__R300_FORCE_TCL |
@@ -1155,7 +1155,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
1155 OUTREG( CRTC_GEN_CNTL, (crtcGenCntl | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B) ); 1155 OUTREG( CRTC_GEN_CNTL, (crtcGenCntl | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B) );
1156 OUTREG( CRTC2_GEN_CNTL, (crtcGenCntl2 | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B) ); 1156 OUTREG( CRTC2_GEN_CNTL, (crtcGenCntl2 | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B) );
1157 1157
1158 /* This is the code for the Aluminium PowerBooks M10 */ 1158 /* This is the code for the Aluminium PowerBooks M10 / iBooks M11 */
1159 if (rinfo->family == CHIP_FAMILY_RV350) { 1159 if (rinfo->family == CHIP_FAMILY_RV350) {
1160 u32 sdram_mode_reg = rinfo->save_regs[35]; 1160 u32 sdram_mode_reg = rinfo->save_regs[35];
1161 static u32 default_mrtable[] = 1161 static u32 default_mrtable[] =
@@ -2741,9 +2741,11 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
2741 rinfo->pm_mode |= radeon_pm_d2; 2741 rinfo->pm_mode |= radeon_pm_d2;
2742 2742
2743 /* We can restart Jasper (M10 chip in albooks), BlueStone (7500 chip 2743 /* We can restart Jasper (M10 chip in albooks), BlueStone (7500 chip
2744 * in some desktop G4s), and Via (M9+ chip on iBook G4) 2744 * in some desktop G4s), Via (M9+ chip on iBook G4) and
2745 * Snowy (M11 chip on iBook G4 manufactured after July 2005)
2745 */ 2746 */
2746 if (!strcmp(rinfo->of_node->name, "ATY,JasperParent")) { 2747 if (!strcmp(rinfo->of_node->name, "ATY,JasperParent") ||
2748 !strcmp(rinfo->of_node->name, "ATY,SnowyParent")) {
2747 rinfo->reinit_func = radeon_reinitialize_M10; 2749 rinfo->reinit_func = radeon_reinitialize_M10;
2748 rinfo->pm_mode |= radeon_pm_off; 2750 rinfo->pm_mode |= radeon_pm_off;
2749 } 2751 }
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 659bc9f62244..01b8b2f78514 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -395,6 +395,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms)
395 395
396#define INREG8(addr) readb((rinfo->mmio_base)+addr) 396#define INREG8(addr) readb((rinfo->mmio_base)+addr)
397#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr) 397#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
398#define INREG16(addr) readw((rinfo->mmio_base)+addr)
399#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr)
398#define INREG(addr) readl((rinfo->mmio_base)+addr) 400#define INREG(addr) readl((rinfo->mmio_base)+addr)
399#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr) 401#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
400 402
diff --git a/drivers/video/aty/xlinit.c b/drivers/video/aty/xlinit.c
index 92643af12581..a085cbf74ecb 100644
--- a/drivers/video/aty/xlinit.c
+++ b/drivers/video/aty/xlinit.c
@@ -174,7 +174,7 @@ int atyfb_xl_init(struct fb_info *info)
174 const struct xl_card_cfg_t * card = &card_cfg[xl_card]; 174 const struct xl_card_cfg_t * card = &card_cfg[xl_card];
175 struct atyfb_par *par = (struct atyfb_par *) info->par; 175 struct atyfb_par *par = (struct atyfb_par *) info->par;
176 union aty_pll pll; 176 union aty_pll pll;
177 int i, err; 177 int err;
178 u32 temp; 178 u32 temp;
179 179
180 aty_st_8(CONFIG_STAT0, 0x85, par); 180 aty_st_8(CONFIG_STAT0, 0x85, par);
@@ -252,9 +252,14 @@ int atyfb_xl_init(struct fb_info *info)
252 aty_st_le32(0xEC, 0x00000000, par); 252 aty_st_le32(0xEC, 0x00000000, par);
253 aty_st_le32(0xFC, 0x00000000, par); 253 aty_st_le32(0xFC, 0x00000000, par);
254 254
255 for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) { 255#if defined (CONFIG_FB_ATY_GENERIC_LCD)
256 aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par); 256 {
257 int i;
258
259 for (i = 0; i < ARRAY_SIZE(lcd_tbl); i++)
260 aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
257 } 261 }
262#endif
258 263
259 aty_st_le16(CONFIG_STAT0, 0x00A4, par); 264 aty_st_le16(CONFIG_STAT0, 0x00A4, par);
260 mdelay(10); 265 mdelay(10);
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index 353cb3f73cf2..3c72c627e65e 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -19,17 +19,17 @@
19#include <linux/fb.h> 19#include <linux/fb.h>
20#include <linux/backlight.h> 20#include <linux/backlight.h>
21 21
22#include <asm/arch-pxa/corgi.h> 22#include <asm/arch/sharpsl.h>
23#include <asm/hardware/scoop.h>
24 23
25#define CORGI_MAX_INTENSITY 0x3e
26#define CORGI_DEFAULT_INTENSITY 0x1f 24#define CORGI_DEFAULT_INTENSITY 0x1f
27#define CORGI_LIMIT_MASK 0x0b 25#define CORGI_LIMIT_MASK 0x0b
28 26
29static int corgibl_powermode = FB_BLANK_UNBLANK; 27static int corgibl_powermode = FB_BLANK_UNBLANK;
30static int current_intensity = 0; 28static int current_intensity = 0;
31static int corgibl_limit = 0; 29static int corgibl_limit = 0;
30static void (*corgibl_mach_set_intensity)(int intensity);
32static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED; 31static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
32static struct backlight_properties corgibl_data;
33 33
34static void corgibl_send_intensity(int intensity) 34static void corgibl_send_intensity(int intensity)
35{ 35{
@@ -43,18 +43,10 @@ static void corgibl_send_intensity(int intensity)
43 intensity &= CORGI_LIMIT_MASK; 43 intensity &= CORGI_LIMIT_MASK;
44 } 44 }
45 45
46 /* Skip 0x20 as it will blank the display */
47 if (intensity >= 0x20)
48 intensity++;
49
50 spin_lock_irqsave(&bl_lock, flags); 46 spin_lock_irqsave(&bl_lock, flags);
51 /* Bits 0-4 are accessed via the SSP interface */ 47
52 corgi_ssp_blduty_set(intensity & 0x1f); 48 corgibl_mach_set_intensity(intensity);
53 /* Bit 5 is via SCOOP */ 49
54 if (intensity & 0x0020)
55 set_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_BACKLIGHT_CONT);
56 else
57 reset_scoop_gpio(&corgiscoop_device.dev, CORGI_SCP_BACKLIGHT_CONT);
58 spin_unlock_irqrestore(&bl_lock, flags); 50 spin_unlock_irqrestore(&bl_lock, flags);
59} 51}
60 52
@@ -113,8 +105,8 @@ static int corgibl_get_power(struct backlight_device *bd)
113 105
114static int corgibl_set_intensity(struct backlight_device *bd, int intensity) 106static int corgibl_set_intensity(struct backlight_device *bd, int intensity)
115{ 107{
116 if (intensity > CORGI_MAX_INTENSITY) 108 if (intensity > corgibl_data.max_brightness)
117 intensity = CORGI_MAX_INTENSITY; 109 intensity = corgibl_data.max_brightness;
118 corgibl_send_intensity(intensity); 110 corgibl_send_intensity(intensity);
119 current_intensity=intensity; 111 current_intensity=intensity;
120 return 0; 112 return 0;
@@ -141,7 +133,6 @@ static struct backlight_properties corgibl_data = {
141 .owner = THIS_MODULE, 133 .owner = THIS_MODULE,
142 .get_power = corgibl_get_power, 134 .get_power = corgibl_get_power,
143 .set_power = corgibl_set_power, 135 .set_power = corgibl_set_power,
144 .max_brightness = CORGI_MAX_INTENSITY,
145 .get_brightness = corgibl_get_intensity, 136 .get_brightness = corgibl_get_intensity,
146 .set_brightness = corgibl_set_intensity, 137 .set_brightness = corgibl_set_intensity,
147}; 138};
@@ -150,12 +141,18 @@ static struct backlight_device *corgi_backlight_device;
150 141
151static int __init corgibl_probe(struct device *dev) 142static int __init corgibl_probe(struct device *dev)
152{ 143{
144 struct corgibl_machinfo *machinfo = dev->platform_data;
145
146 corgibl_data.max_brightness = machinfo->max_intensity;
147 corgibl_mach_set_intensity = machinfo->set_bl_intensity;
148
153 corgi_backlight_device = backlight_device_register ("corgi-bl", 149 corgi_backlight_device = backlight_device_register ("corgi-bl",
154 NULL, &corgibl_data); 150 NULL, &corgibl_data);
155 if (IS_ERR (corgi_backlight_device)) 151 if (IS_ERR (corgi_backlight_device))
156 return PTR_ERR (corgi_backlight_device); 152 return PTR_ERR (corgi_backlight_device);
157 153
158 corgibl_set_intensity(NULL, CORGI_DEFAULT_INTENSITY); 154 corgibl_set_intensity(NULL, CORGI_DEFAULT_INTENSITY);
155 corgibl_limit_intensity(0);
159 156
160 printk("Corgi Backlight Driver Initialized.\n"); 157 printk("Corgi Backlight Driver Initialized.\n");
161 return 0; 158 return 0;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 2e93224d2d55..0fc8bb499c3f 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -767,7 +767,7 @@ static const char *fbcon_startup(void)
767 const char *display_desc = "frame buffer device"; 767 const char *display_desc = "frame buffer device";
768 struct display *p = &fb_display[fg_console]; 768 struct display *p = &fb_display[fg_console];
769 struct vc_data *vc = vc_cons[fg_console].d; 769 struct vc_data *vc = vc_cons[fg_console].d;
770 struct font_desc *font = NULL; 770 const struct font_desc *font = NULL;
771 struct module *owner; 771 struct module *owner;
772 struct fb_info *info = NULL; 772 struct fb_info *info = NULL;
773 struct fbcon_ops *ops; 773 struct fbcon_ops *ops;
@@ -841,7 +841,7 @@ static const char *fbcon_startup(void)
841 info->var.yres); 841 info->var.yres);
842 vc->vc_font.width = font->width; 842 vc->vc_font.width = font->width;
843 vc->vc_font.height = font->height; 843 vc->vc_font.height = font->height;
844 vc->vc_font.data = p->fontdata = font->data; 844 vc->vc_font.data = (void *)(p->fontdata = font->data);
845 vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ 845 vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
846 } 846 }
847 847
@@ -941,7 +941,7 @@ static void fbcon_init(struct vc_data *vc, int init)
941 fb, copy the font from that console */ 941 fb, copy the font from that console */
942 t = &fb_display[svc->vc_num]; 942 t = &fb_display[svc->vc_num];
943 if (!vc->vc_font.data) { 943 if (!vc->vc_font.data) {
944 vc->vc_font.data = p->fontdata = t->fontdata; 944 vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
945 vc->vc_font.width = (*default_mode)->vc_font.width; 945 vc->vc_font.width = (*default_mode)->vc_font.width;
946 vc->vc_font.height = (*default_mode)->vc_font.height; 946 vc->vc_font.height = (*default_mode)->vc_font.height;
947 p->userfont = t->userfont; 947 p->userfont = t->userfont;
@@ -1188,7 +1188,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
1188 return; 1188 return;
1189 t = &fb_display[svc->vc_num]; 1189 t = &fb_display[svc->vc_num];
1190 if (!vc->vc_font.data) { 1190 if (!vc->vc_font.data) {
1191 vc->vc_font.data = p->fontdata = t->fontdata; 1191 vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
1192 vc->vc_font.width = (*default_mode)->vc_font.width; 1192 vc->vc_font.width = (*default_mode)->vc_font.width;
1193 vc->vc_font.height = (*default_mode)->vc_font.height; 1193 vc->vc_font.height = (*default_mode)->vc_font.height;
1194 p->userfont = t->userfont; 1194 p->userfont = t->userfont;
@@ -1687,6 +1687,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1687 case SM_DOWN: 1687 case SM_DOWN:
1688 if (count > vc->vc_rows) /* Maximum realistic size */ 1688 if (count > vc->vc_rows) /* Maximum realistic size */
1689 count = vc->vc_rows; 1689 count = vc->vc_rows;
1690 if (logo_shown >= 0)
1691 goto redraw_down;
1690 switch (p->scrollmode) { 1692 switch (p->scrollmode) {
1691 case SCROLL_MOVE: 1693 case SCROLL_MOVE:
1692 ops->bmove(vc, info, t, 0, t + count, 0, 1694 ops->bmove(vc, info, t, 0, t + count, 0,
@@ -2148,7 +2150,7 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
2148} 2150}
2149 2151
2150static int fbcon_do_set_font(struct vc_data *vc, int w, int h, 2152static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
2151 u8 * data, int userfont) 2153 const u8 * data, int userfont)
2152{ 2154{
2153 struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; 2155 struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
2154 struct display *p = &fb_display[vc->vc_num]; 2156 struct display *p = &fb_display[vc->vc_num];
@@ -2166,7 +2168,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
2166 cnt = FNTCHARCNT(data); 2168 cnt = FNTCHARCNT(data);
2167 else 2169 else
2168 cnt = 256; 2170 cnt = 256;
2169 vc->vc_font.data = p->fontdata = data; 2171 vc->vc_font.data = (void *)(p->fontdata = data);
2170 if ((p->userfont = userfont)) 2172 if ((p->userfont = userfont))
2171 REFCOUNT(data)++; 2173 REFCOUNT(data)++;
2172 vc->vc_font.width = w; 2174 vc->vc_font.width = w;
@@ -2323,7 +2325,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne
2323 tmp->vc_font.width == w && 2325 tmp->vc_font.width == w &&
2324 !memcmp(fb_display[i].fontdata, new_data, size)) { 2326 !memcmp(fb_display[i].fontdata, new_data, size)) {
2325 kfree(new_data - FONT_EXTRA_WORDS * sizeof(int)); 2327 kfree(new_data - FONT_EXTRA_WORDS * sizeof(int));
2326 new_data = fb_display[i].fontdata; 2328 new_data = (u8 *)fb_display[i].fontdata;
2327 break; 2329 break;
2328 } 2330 }
2329 } 2331 }
@@ -2333,7 +2335,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, unsigne
2333static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name) 2335static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
2334{ 2336{
2335 struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; 2337 struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
2336 struct font_desc *f; 2338 const struct font_desc *f;
2337 2339
2338 if (!name) 2340 if (!name)
2339 f = get_default_font(info->var.xres, info->var.yres); 2341 f = get_default_font(info->var.xres, info->var.yres);
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 08befafe11d1..0738cd62def2 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -30,7 +30,7 @@ struct display {
30 /* Filled in by the frame buffer device */ 30 /* Filled in by the frame buffer device */
31 u_short inverse; /* != 0 text black on white as default */ 31 u_short inverse; /* != 0 text black on white as default */
32 /* Filled in by the low-level console driver */ 32 /* Filled in by the low-level console driver */
33 u_char *fontdata; 33 const u_char *fontdata;
34 int userfont; /* != 0 if fontdata kmalloc()ed */ 34 int userfont; /* != 0 if fontdata kmalloc()ed */
35 u_short scrollmode; /* Scroll Method */ 35 u_short scrollmode; /* Scroll Method */
36 short yscroll; /* Hardware scrolling */ 36 short yscroll; /* Hardware scrolling */
diff --git a/drivers/video/console/font_10x18.c b/drivers/video/console/font_10x18.c
index ff0af96e4dfc..e6aa0eab5bb6 100644
--- a/drivers/video/console/font_10x18.c
+++ b/drivers/video/console/font_10x18.c
@@ -7,7 +7,7 @@
7 7
8#define FONTDATAMAX 9216 8#define FONTDATAMAX 9216
9 9
10static unsigned char fontdata_10x18[FONTDATAMAX] = { 10static const unsigned char fontdata_10x18[FONTDATAMAX] = {
11 11
12 /* 0 0x00 '^@' */ 12 /* 0 0x00 '^@' */
13 0x00, 0x00, /* 0000000000 */ 13 0x00, 0x00, /* 0000000000 */
@@ -5132,7 +5132,7 @@ static unsigned char fontdata_10x18[FONTDATAMAX] = {
5132}; 5132};
5133 5133
5134 5134
5135struct font_desc font_10x18 = { 5135const struct font_desc font_10x18 = {
5136 FONT10x18_IDX, 5136 FONT10x18_IDX,
5137 "10x18", 5137 "10x18",
5138 10, 5138 10,
diff --git a/drivers/video/console/font_6x11.c b/drivers/video/console/font_6x11.c
index c52f1294044a..89976cd97494 100644
--- a/drivers/video/console/font_6x11.c
+++ b/drivers/video/console/font_6x11.c
@@ -8,7 +8,7 @@
8 8
9#define FONTDATAMAX (11*256) 9#define FONTDATAMAX (11*256)
10 10
11static unsigned char fontdata_6x11[FONTDATAMAX] = { 11static const unsigned char fontdata_6x11[FONTDATAMAX] = {
12 12
13 /* 0 0x00 '^@' */ 13 /* 0 0x00 '^@' */
14 0x00, /* 00000000 */ 14 0x00, /* 00000000 */
@@ -3341,7 +3341,7 @@ static unsigned char fontdata_6x11[FONTDATAMAX] = {
3341}; 3341};
3342 3342
3343 3343
3344struct font_desc font_vga_6x11 = { 3344const struct font_desc font_vga_6x11 = {
3345 VGA6x11_IDX, 3345 VGA6x11_IDX,
3346 "ProFont6x11", 3346 "ProFont6x11",
3347 6, 3347 6,
diff --git a/drivers/video/console/font_7x14.c b/drivers/video/console/font_7x14.c
index 1fa7fcf2ff72..bbf116647397 100644
--- a/drivers/video/console/font_7x14.c
+++ b/drivers/video/console/font_7x14.c
@@ -7,7 +7,7 @@
7 7
8#define FONTDATAMAX 3584 8#define FONTDATAMAX 3584
9 9
10static unsigned char fontdata_7x14[FONTDATAMAX] = { 10static const unsigned char fontdata_7x14[FONTDATAMAX] = {
11 11
12 /* 0 0x00 '^@' */ 12 /* 0 0x00 '^@' */
13 0x00, /* 0000000 */ 13 0x00, /* 0000000 */
@@ -4108,7 +4108,7 @@ static unsigned char fontdata_7x14[FONTDATAMAX] = {
4108}; 4108};
4109 4109
4110 4110
4111struct font_desc font_7x14 = { 4111const struct font_desc font_7x14 = {
4112 FONT7x14_IDX, 4112 FONT7x14_IDX,
4113 "7x14", 4113 "7x14",
4114 7, 4114 7,
diff --git a/drivers/video/console/font_8x16.c b/drivers/video/console/font_8x16.c
index e6f8dbaa122b..74fe86f28ff4 100644
--- a/drivers/video/console/font_8x16.c
+++ b/drivers/video/console/font_8x16.c
@@ -8,7 +8,7 @@
8 8
9#define FONTDATAMAX 4096 9#define FONTDATAMAX 4096
10 10
11static unsigned char fontdata_8x16[FONTDATAMAX] = { 11static const unsigned char fontdata_8x16[FONTDATAMAX] = {
12 12
13 /* 0 0x00 '^@' */ 13 /* 0 0x00 '^@' */
14 0x00, /* 00000000 */ 14 0x00, /* 00000000 */
@@ -4621,7 +4621,7 @@ static unsigned char fontdata_8x16[FONTDATAMAX] = {
4621}; 4621};
4622 4622
4623 4623
4624struct font_desc font_vga_8x16 = { 4624const struct font_desc font_vga_8x16 = {
4625 VGA8x16_IDX, 4625 VGA8x16_IDX,
4626 "VGA8x16", 4626 "VGA8x16",
4627 8, 4627 8,
diff --git a/drivers/video/console/font_8x8.c b/drivers/video/console/font_8x8.c
index 57fbe266a6b9..26199f8ee908 100644
--- a/drivers/video/console/font_8x8.c
+++ b/drivers/video/console/font_8x8.c
@@ -8,7 +8,7 @@
8 8
9#define FONTDATAMAX 2048 9#define FONTDATAMAX 2048
10 10
11static unsigned char fontdata_8x8[FONTDATAMAX] = { 11static const unsigned char fontdata_8x8[FONTDATAMAX] = {
12 12
13 /* 0 0x00 '^@' */ 13 /* 0 0x00 '^@' */
14 0x00, /* 00000000 */ 14 0x00, /* 00000000 */
@@ -2573,7 +2573,7 @@ static unsigned char fontdata_8x8[FONTDATAMAX] = {
2573}; 2573};
2574 2574
2575 2575
2576struct font_desc font_vga_8x8 = { 2576const struct font_desc font_vga_8x8 = {
2577 VGA8x8_IDX, 2577 VGA8x8_IDX,
2578 "VGA8x8", 2578 "VGA8x8",
2579 8, 2579 8,
diff --git a/drivers/video/console/font_acorn_8x8.c b/drivers/video/console/font_acorn_8x8.c
index d565505e3069..2d2e39632e2d 100644
--- a/drivers/video/console/font_acorn_8x8.c
+++ b/drivers/video/console/font_acorn_8x8.c
@@ -3,7 +3,7 @@
3#include <linux/config.h> 3#include <linux/config.h>
4#include <linux/font.h> 4#include <linux/font.h>
5 5
6static unsigned char acorndata_8x8[] = { 6static const unsigned char acorndata_8x8[] = {
7/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ 7/* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */
8/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ 8/* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */
9/* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */ 9/* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */
@@ -262,7 +262,7 @@ static unsigned char acorndata_8x8[] = {
262/* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 262/* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
263}; 263};
264 264
265struct font_desc font_acorn_8x8 = { 265const struct font_desc font_acorn_8x8 = {
266 ACORN8x8_IDX, 266 ACORN8x8_IDX,
267 "Acorn8x8", 267 "Acorn8x8",
268 8, 268 8,
diff --git a/drivers/video/console/font_mini_4x6.c b/drivers/video/console/font_mini_4x6.c
index 593b95500a0c..d818234fdf11 100644
--- a/drivers/video/console/font_mini_4x6.c
+++ b/drivers/video/console/font_mini_4x6.c
@@ -43,7 +43,7 @@ __END__;
43 43
44#define FONTDATAMAX 1536 44#define FONTDATAMAX 1536
45 45
46static unsigned char fontdata_mini_4x6[FONTDATAMAX] = { 46static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
47 47
48 /*{*/ 48 /*{*/
49 /* Char 0: ' ' */ 49 /* Char 0: ' ' */
@@ -2147,7 +2147,7 @@ static unsigned char fontdata_mini_4x6[FONTDATAMAX] = {
2147 /*}*/ 2147 /*}*/
2148}; 2148};
2149 2149
2150struct font_desc font_mini_4x6 = { 2150const struct font_desc font_mini_4x6 = {
2151 MINI4x6_IDX, 2151 MINI4x6_IDX,
2152 "MINI4x6", 2152 "MINI4x6",
2153 4, 2153 4,
diff --git a/drivers/video/console/font_pearl_8x8.c b/drivers/video/console/font_pearl_8x8.c
index 5fa95f118818..e646c88f55c7 100644
--- a/drivers/video/console/font_pearl_8x8.c
+++ b/drivers/video/console/font_pearl_8x8.c
@@ -13,7 +13,7 @@
13 13
14#define FONTDATAMAX 2048 14#define FONTDATAMAX 2048
15 15
16static unsigned char fontdata_pearl8x8[FONTDATAMAX] = { 16static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
17 17
18 /* 0 0x00 '^@' */ 18 /* 0 0x00 '^@' */
19 0x00, /* 00000000 */ 19 0x00, /* 00000000 */
@@ -2577,7 +2577,7 @@ static unsigned char fontdata_pearl8x8[FONTDATAMAX] = {
2577 2577
2578}; 2578};
2579 2579
2580struct font_desc font_pearl_8x8 = { 2580const struct font_desc font_pearl_8x8 = {
2581 PEARL8x8_IDX, 2581 PEARL8x8_IDX,
2582 "PEARL8x8", 2582 "PEARL8x8",
2583 8, 2583 8,
diff --git a/drivers/video/console/font_sun12x22.c b/drivers/video/console/font_sun12x22.c
index c7bd967ea100..ab5eb93407b4 100644
--- a/drivers/video/console/font_sun12x22.c
+++ b/drivers/video/console/font_sun12x22.c
@@ -2,7 +2,7 @@
2 2
3#define FONTDATAMAX 11264 3#define FONTDATAMAX 11264
4 4
5static unsigned char fontdata_sun12x22[FONTDATAMAX] = { 5static const unsigned char fontdata_sun12x22[FONTDATAMAX] = {
6 6
7 /* 0 0x00 '^@' */ 7 /* 0 0x00 '^@' */
8 0x00, 0x00, /* 000000000000 */ 8 0x00, 0x00, /* 000000000000 */
@@ -6151,7 +6151,7 @@ static unsigned char fontdata_sun12x22[FONTDATAMAX] = {
6151}; 6151};
6152 6152
6153 6153
6154struct font_desc font_sun_12x22 = { 6154const struct font_desc font_sun_12x22 = {
6155 SUN12x22_IDX, 6155 SUN12x22_IDX,
6156 "SUN12x22", 6156 "SUN12x22",
6157 12, 6157 12,
diff --git a/drivers/video/console/font_sun8x16.c b/drivers/video/console/font_sun8x16.c
index 2af3ab354652..41f910f5529c 100644
--- a/drivers/video/console/font_sun8x16.c
+++ b/drivers/video/console/font_sun8x16.c
@@ -2,7 +2,7 @@
2 2
3#define FONTDATAMAX 4096 3#define FONTDATAMAX 4096
4 4
5static unsigned char fontdata_sun8x16[FONTDATAMAX] = { 5static const unsigned char fontdata_sun8x16[FONTDATAMAX] = {
6/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 6/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
7/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, 7/* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00,
8/* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00, 8/* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00,
@@ -261,7 +261,7 @@ static unsigned char fontdata_sun8x16[FONTDATAMAX] = {
261/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 261/* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
262}; 262};
263 263
264struct font_desc font_sun_8x16 = { 264const struct font_desc font_sun_8x16 = {
265 SUN8x16_IDX, 265 SUN8x16_IDX,
266 "SUN8x16", 266 "SUN8x16",
267 8, 267 8,
diff --git a/drivers/video/console/fonts.c b/drivers/video/console/fonts.c
index e79b29702649..4fd07d9eca03 100644
--- a/drivers/video/console/fonts.c
+++ b/drivers/video/console/fonts.c
@@ -23,7 +23,7 @@
23 23
24#define NO_FONTS 24#define NO_FONTS
25 25
26static struct font_desc *fonts[] = { 26static const struct font_desc *fonts[] = {
27#ifdef CONFIG_FONT_8x8 27#ifdef CONFIG_FONT_8x8
28#undef NO_FONTS 28#undef NO_FONTS
29 &font_vga_8x8, 29 &font_vga_8x8,
@@ -84,7 +84,7 @@ static struct font_desc *fonts[] = {
84 * 84 *
85 */ 85 */
86 86
87struct font_desc *find_font(char *name) 87const struct font_desc *find_font(const char *name)
88{ 88{
89 unsigned int i; 89 unsigned int i;
90 90
@@ -108,10 +108,10 @@ struct font_desc *find_font(char *name)
108 * 108 *
109 */ 109 */
110 110
111struct font_desc *get_default_font(int xres, int yres) 111const struct font_desc *get_default_font(int xres, int yres)
112{ 112{
113 int i, c, cc; 113 int i, c, cc;
114 struct font_desc *f, *g; 114 const struct font_desc *f, *g;
115 115
116 g = NULL; 116 g = NULL;
117 cc = -10000; 117 cc = -10000;
@@ -138,7 +138,6 @@ struct font_desc *get_default_font(int xres, int yres)
138 return g; 138 return g;
139} 139}
140 140
141EXPORT_SYMBOL(fonts);
142EXPORT_SYMBOL(find_font); 141EXPORT_SYMBOL(find_font);
143EXPORT_SYMBOL(get_default_font); 142EXPORT_SYMBOL(get_default_font);
144 143
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 0705cd741411..6ef6f7760e47 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1020,7 +1020,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
1020static int vgacon_resize(struct vc_data *c, unsigned int width, 1020static int vgacon_resize(struct vc_data *c, unsigned int width,
1021 unsigned int height) 1021 unsigned int height)
1022{ 1022{
1023 if (width % 2 || width > ORIG_VIDEO_COLS || height > ORIG_VIDEO_LINES) 1023 if (width % 2 || width > ORIG_VIDEO_COLS ||
1024 height > (ORIG_VIDEO_LINES * vga_default_font_height)/
1025 c->vc_font.height)
1024 return -EINVAL; 1026 return -EINVAL;
1025 1027
1026 if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */ 1028 if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index ae2762cb5608..6992100a508c 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -410,20 +410,21 @@ static void cyblafb_imageblit(struct fb_info *info,
410 out32(GE0C,point(image->dx+image->width-1,image->dy+image->height-1)); 410 out32(GE0C,point(image->dx+image->width-1,image->dy+image->height-1));
411 411
412 while(index < index_end) { 412 while(index < index_end) {
413 const char *p = image->data + index;
413 for(i=0;i<width_dds;i++) { 414 for(i=0;i<width_dds;i++) {
414 out32(GE9C,*((u32*) ((u32)image->data + index))); 415 out32(GE9C,*(u32*)p);
416 p+=4;
415 index+=4; 417 index+=4;
416 } 418 }
417 switch(width_dbs) { 419 switch(width_dbs) {
418 case 0: break; 420 case 0: break;
419 case 8: out32(GE9C,*((u8*)((u32)image->data+index))); 421 case 8: out32(GE9C,*(u8*)p);
420 index+=1; 422 index+=1;
421 break; 423 break;
422 case 16: out32(GE9C,*((u16*)((u32)image->data+index))); 424 case 16: out32(GE9C,*(u16*)p);
423 index+=2; 425 index+=2;
424 break; 426 break;
425 case 24: out32(GE9C,(u32)(*((u16*)((u32)image->data+index))) | 427 case 24: out32(GE9C,*(u16*)p | *(u8*)(p+2)<<16);
426 (u32)(*((u8*)((u32)image->data+index+2)))<<16);
427 index+=3; 428 index+=3;
428 break; 429 break;
429 } 430 }
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbcvt.c
index cfa61b512de0..0b6af00d197e 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbcvt.c
@@ -272,11 +272,11 @@ static void fb_cvt_convert_to_mode(struct fb_cvt_data *cvt,
272{ 272{
273 mode->refresh = cvt->f_refresh; 273 mode->refresh = cvt->f_refresh;
274 mode->pixclock = KHZ2PICOS(cvt->pixclock/1000); 274 mode->pixclock = KHZ2PICOS(cvt->pixclock/1000);
275 mode->left_margin = cvt->h_front_porch; 275 mode->left_margin = cvt->h_back_porch;
276 mode->right_margin = cvt->h_back_porch; 276 mode->right_margin = cvt->h_front_porch;
277 mode->hsync_len = cvt->hsync; 277 mode->hsync_len = cvt->hsync;
278 mode->upper_margin = cvt->v_front_porch; 278 mode->upper_margin = cvt->v_back_porch;
279 mode->lower_margin = cvt->v_back_porch; 279 mode->lower_margin = cvt->v_front_porch;
280 mode->vsync_len = cvt->vsync; 280 mode->vsync_len = cvt->vsync;
281 281
282 mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT); 282 mode->sync &= ~(FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT);
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index fda53aac1fc1..689d2586366d 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -44,7 +44,7 @@ static void i810i2c_setscl(void *data, int state)
44{ 44{
45 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 45 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
46 struct i810fb_par *par = chan->par; 46 struct i810fb_par *par = chan->par;
47 u8 *mmio = par->mmio_start_virtual; 47 u8 __iomem *mmio = par->mmio_start_virtual;
48 48
49 i810_writel(mmio, GPIOB, (state ? SCL_VAL_OUT : 0) | SCL_DIR | 49 i810_writel(mmio, GPIOB, (state ? SCL_VAL_OUT : 0) | SCL_DIR |
50 SCL_DIR_MASK | SCL_VAL_MASK); 50 SCL_DIR_MASK | SCL_VAL_MASK);
@@ -55,7 +55,7 @@ static void i810i2c_setsda(void *data, int state)
55{ 55{
56 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 56 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
57 struct i810fb_par *par = chan->par; 57 struct i810fb_par *par = chan->par;
58 u8 *mmio = par->mmio_start_virtual; 58 u8 __iomem *mmio = par->mmio_start_virtual;
59 59
60 i810_writel(mmio, GPIOB, (state ? SDA_VAL_OUT : 0) | SDA_DIR | 60 i810_writel(mmio, GPIOB, (state ? SDA_VAL_OUT : 0) | SDA_DIR |
61 SDA_DIR_MASK | SDA_VAL_MASK); 61 SDA_DIR_MASK | SDA_VAL_MASK);
@@ -66,7 +66,7 @@ static int i810i2c_getscl(void *data)
66{ 66{
67 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 67 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
68 struct i810fb_par *par = chan->par; 68 struct i810fb_par *par = chan->par;
69 u8 *mmio = par->mmio_start_virtual; 69 u8 __iomem *mmio = par->mmio_start_virtual;
70 70
71 i810_writel(mmio, GPIOB, SCL_DIR_MASK); 71 i810_writel(mmio, GPIOB, SCL_DIR_MASK);
72 i810_writel(mmio, GPIOB, 0); 72 i810_writel(mmio, GPIOB, 0);
@@ -77,7 +77,7 @@ static int i810i2c_getsda(void *data)
77{ 77{
78 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 78 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
79 struct i810fb_par *par = chan->par; 79 struct i810fb_par *par = chan->par;
80 u8 *mmio = par->mmio_start_virtual; 80 u8 __iomem *mmio = par->mmio_start_virtual;
81 81
82 i810_writel(mmio, GPIOB, SDA_DIR_MASK); 82 i810_writel(mmio, GPIOB, SDA_DIR_MASK);
83 i810_writel(mmio, GPIOB, 0); 83 i810_writel(mmio, GPIOB, 0);
@@ -88,7 +88,7 @@ static void i810ddc_setscl(void *data, int state)
88{ 88{
89 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 89 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
90 struct i810fb_par *par = chan->par; 90 struct i810fb_par *par = chan->par;
91 u8 *mmio = par->mmio_start_virtual; 91 u8 __iomem *mmio = par->mmio_start_virtual;
92 92
93 i810_writel(mmio, GPIOA, (state ? SCL_VAL_OUT : 0) | SCL_DIR | 93 i810_writel(mmio, GPIOA, (state ? SCL_VAL_OUT : 0) | SCL_DIR |
94 SCL_DIR_MASK | SCL_VAL_MASK); 94 SCL_DIR_MASK | SCL_VAL_MASK);
@@ -99,7 +99,7 @@ static void i810ddc_setsda(void *data, int state)
99{ 99{
100 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 100 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
101 struct i810fb_par *par = chan->par; 101 struct i810fb_par *par = chan->par;
102 u8 *mmio = par->mmio_start_virtual; 102 u8 __iomem *mmio = par->mmio_start_virtual;
103 103
104 i810_writel(mmio, GPIOA, (state ? SDA_VAL_OUT : 0) | SDA_DIR | 104 i810_writel(mmio, GPIOA, (state ? SDA_VAL_OUT : 0) | SDA_DIR |
105 SDA_DIR_MASK | SDA_VAL_MASK); 105 SDA_DIR_MASK | SDA_VAL_MASK);
@@ -110,7 +110,7 @@ static int i810ddc_getscl(void *data)
110{ 110{
111 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 111 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
112 struct i810fb_par *par = chan->par; 112 struct i810fb_par *par = chan->par;
113 u8 *mmio = par->mmio_start_virtual; 113 u8 __iomem *mmio = par->mmio_start_virtual;
114 114
115 i810_writel(mmio, GPIOA, SCL_DIR_MASK); 115 i810_writel(mmio, GPIOA, SCL_DIR_MASK);
116 i810_writel(mmio, GPIOA, 0); 116 i810_writel(mmio, GPIOA, 0);
@@ -121,7 +121,7 @@ static int i810ddc_getsda(void *data)
121{ 121{
122 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; 122 struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data;
123 struct i810fb_par *par = chan->par; 123 struct i810fb_par *par = chan->par;
124 u8 *mmio = par->mmio_start_virtual; 124 u8 __iomem *mmio = par->mmio_start_virtual;
125 125
126 i810_writel(mmio, GPIOA, SDA_DIR_MASK); 126 i810_writel(mmio, GPIOA, SDA_DIR_MASK);
127 i810_writel(mmio, GPIOA, 0); 127 i810_writel(mmio, GPIOA, 0);
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index cabd53cec991..1d54d3d6960b 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -36,7 +36,6 @@
36 36
37#include <asm/hardware.h> 37#include <asm/hardware.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/mach-types.h>
40#include <asm/uaccess.h> 39#include <asm/uaccess.h>
41#include <asm/arch/imxfb.h> 40#include <asm/arch/imxfb.h>
42 41
@@ -425,7 +424,7 @@ static void imxfb_setup_gpio(struct imxfb_info *fbi)
425 * Power management hooks. Note that we won't be called from IRQ context, 424 * Power management hooks. Note that we won't be called from IRQ context,
426 * unlike the blank functions above, so we may sleep. 425 * unlike the blank functions above, so we may sleep.
427 */ 426 */
428static int imxfb_suspend(struct device *dev, u32 state, u32 level) 427static int imxfb_suspend(struct device *dev, pm_message_t state, u32 level)
429{ 428{
430 struct imxfb_info *fbi = dev_get_drvdata(dev); 429 struct imxfb_info *fbi = dev_get_drvdata(dev);
431 pr_debug("%s\n",__FUNCTION__); 430 pr_debug("%s\n",__FUNCTION__);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index bf62e6ed0382..80a09344f1aa 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -226,7 +226,7 @@ MODULE_DEVICE_TABLE(pci, intelfb_pci_table);
226 226
227static int accel = 1; 227static int accel = 1;
228static int vram = 4; 228static int vram = 4;
229static int hwcursor = 1; 229static int hwcursor = 0;
230static int mtrr = 1; 230static int mtrr = 1;
231static int fixed = 0; 231static int fixed = 0;
232static int noinit = 0; 232static int noinit = 0;
@@ -609,15 +609,9 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
609 dinfo->accel = 0; 609 dinfo->accel = 0;
610 } 610 }
611 611
612 if (MB(voffset) < stolen_size)
613 offset = (stolen_size >> 12);
614 else
615 offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
616
617 /* Framebuffer parameters - Use all the stolen memory if >= vram */ 612 /* Framebuffer parameters - Use all the stolen memory if >= vram */
618 if (ROUND_UP_TO_PAGE(stolen_size) >= ((offset << 12) + MB(vram))) { 613 if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) {
619 dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size); 614 dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size);
620 dinfo->fb.offset = 0;
621 dinfo->fbmem_gart = 0; 615 dinfo->fbmem_gart = 0;
622 } else { 616 } else {
623 dinfo->fb.size = MB(vram); 617 dinfo->fb.size = MB(vram);
@@ -648,6 +642,11 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
648 return -ENODEV; 642 return -ENODEV;
649 } 643 }
650 644
645 if (MB(voffset) < stolen_size)
646 offset = (stolen_size >> 12);
647 else
648 offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE;
649
651 /* set the mem offsets - set them after the already used pages */ 650 /* set the mem offsets - set them after the already used pages */
652 if (dinfo->accel) { 651 if (dinfo->accel) {
653 dinfo->ring.offset = offset + gtt_info.current_memory; 652 dinfo->ring.offset = offset + gtt_info.current_memory;
@@ -662,10 +661,11 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
662 + (dinfo->cursor.size >> 12); 661 + (dinfo->cursor.size >> 12);
663 } 662 }
664 663
664 /* Allocate memories (which aren't stolen) */
665 /* Map the fb and MMIO regions */ 665 /* Map the fb and MMIO regions */
666 /* ioremap only up to the end of used aperture */ 666 /* ioremap only up to the end of used aperture */
667 dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache 667 dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache
668 (dinfo->aperture.physical, (dinfo->fb.offset << 12) 668 (dinfo->aperture.physical, ((offset + dinfo->fb.offset) << 12)
669 + dinfo->fb.size); 669 + dinfo->fb.size);
670 if (!dinfo->aperture.virtual) { 670 if (!dinfo->aperture.virtual) {
671 ERR_MSG("Cannot remap FB region.\n"); 671 ERR_MSG("Cannot remap FB region.\n");
@@ -682,7 +682,6 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
682 return -ENODEV; 682 return -ENODEV;
683 } 683 }
684 684
685 /* Allocate memories (which aren't stolen) */
686 if (dinfo->accel) { 685 if (dinfo->accel) {
687 if (!(dinfo->gtt_ring_mem = 686 if (!(dinfo->gtt_ring_mem =
688 agp_allocate_memory(bridge, dinfo->ring.size >> 12, 687 agp_allocate_memory(bridge, dinfo->ring.size >> 12,
@@ -1484,7 +1483,7 @@ intelfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
1484#endif 1483#endif
1485 1484
1486 if (!dinfo->hwcursor) 1485 if (!dinfo->hwcursor)
1487 return -ENXIO; 1486 return soft_cursor(info, cursor);
1488 1487
1489 intelfbhw_cursor_hide(dinfo); 1488 intelfbhw_cursor_hide(dinfo);
1490 1489
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 98e00d8601e5..e02da41f1b26 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -1285,7 +1285,7 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
1285 vaddr_t vm; 1285 vaddr_t vm;
1286 unsigned int offs; 1286 unsigned int offs;
1287 unsigned int offs2; 1287 unsigned int offs2;
1288 unsigned char store; 1288 unsigned char store, orig;
1289 unsigned char bytes[32]; 1289 unsigned char bytes[32];
1290 unsigned char* tmp; 1290 unsigned char* tmp;
1291 1291
@@ -1298,7 +1298,8 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
1298 if (maxSize > 0x2000000) maxSize = 0x2000000; 1298 if (maxSize > 0x2000000) maxSize = 0x2000000;
1299 1299
1300 mga_outb(M_EXTVGA_INDEX, 0x03); 1300 mga_outb(M_EXTVGA_INDEX, 0x03);
1301 mga_outb(M_EXTVGA_DATA, mga_inb(M_EXTVGA_DATA) | 0x80); 1301 orig = mga_inb(M_EXTVGA_DATA);
1302 mga_outb(M_EXTVGA_DATA, orig | 0x80);
1302 1303
1303 store = mga_readb(vm, 0x1234); 1304 store = mga_readb(vm, 0x1234);
1304 tmp = bytes; 1305 tmp = bytes;
@@ -1323,7 +1324,7 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
1323 mga_writeb(vm, 0x1234, store); 1324 mga_writeb(vm, 0x1234, store);
1324 1325
1325 mga_outb(M_EXTVGA_INDEX, 0x03); 1326 mga_outb(M_EXTVGA_INDEX, 0x03);
1326 mga_outb(M_EXTVGA_DATA, mga_inb(M_EXTVGA_DATA) & ~0x80); 1327 mga_outb(M_EXTVGA_DATA, orig);
1327 1328
1328 *realSize = offs - 0x100000; 1329 *realSize = offs - 0x100000;
1329#ifdef CONFIG_FB_MATROX_MILLENIUM 1330#ifdef CONFIG_FB_MATROX_MILLENIUM
@@ -1858,6 +1859,8 @@ static int initMatrox2(WPMINFO struct board* b){
1858 to yres_virtual * xres_virtual < 2^32 */ 1859 to yres_virtual * xres_virtual < 2^32 */
1859 } 1860 }
1860 matroxfb_init_fix(PMINFO2); 1861 matroxfb_init_fix(PMINFO2);
1862 ACCESS_FBINFO(fbcon.screen_base) = vaddr_va(ACCESS_FBINFO(video.vbase));
1863 matroxfb_update_fix(PMINFO2);
1861 /* Normalize values (namely yres_virtual) */ 1864 /* Normalize values (namely yres_virtual) */
1862 matroxfb_check_var(&vesafb_defined, &ACCESS_FBINFO(fbcon)); 1865 matroxfb_check_var(&vesafb_defined, &ACCESS_FBINFO(fbcon));
1863 /* And put it into "current" var. Do NOT program hardware yet, or we'll not take over 1866 /* And put it into "current" var. Do NOT program hardware yet, or we'll not take over
@@ -2010,11 +2013,11 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
2010 } 2013 }
2011 /* not match... */ 2014 /* not match... */
2012 if (!b->vendor) 2015 if (!b->vendor)
2013 return -1; 2016 return -ENODEV;
2014 if (dev > 0) { 2017 if (dev > 0) {
2015 /* not requested one... */ 2018 /* not requested one... */
2016 dev--; 2019 dev--;
2017 return -1; 2020 return -ENODEV;
2018 } 2021 }
2019 pci_read_config_dword(pdev, PCI_COMMAND, &cmd); 2022 pci_read_config_dword(pdev, PCI_COMMAND, &cmd);
2020 if (pci_enable_device(pdev)) { 2023 if (pci_enable_device(pdev)) {
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index ace484fa61ce..12f2884d3f0b 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -209,10 +209,13 @@ int nvidia_probe_i2c_connector(struct fb_info *info, int conn, u8 **out_edid)
209 209
210 if (!edid && conn == 1) { 210 if (!edid && conn == 1) {
211 /* try to get from firmware */ 211 /* try to get from firmware */
212 edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 212 const u8 *e = fb_firmware_edid(info->device);
213 if (edid) 213
214 memcpy(edid, fb_firmware_edid(info->device), 214 if (e != NULL) {
215 EDID_LENGTH); 215 edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
216 if (edid)
217 memcpy(edid, e, EDID_LENGTH);
218 }
216 } 219 }
217 220
218 if (out_edid) 221 if (out_edid)
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 3620de0f252e..a7f020ada630 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -893,7 +893,7 @@ static int nvidiafb_cursor(struct fb_info *info, struct fb_cursor *cursor)
893 int i, set = cursor->set; 893 int i, set = cursor->set;
894 u16 fg, bg; 894 u16 fg, bg;
895 895
896 if (!hwcur || cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS) 896 if (cursor->image.width > MAX_CURS || cursor->image.height > MAX_CURS)
897 return -ENXIO; 897 return -ENXIO;
898 898
899 NVShowHideCursor(par, 0); 899 NVShowHideCursor(par, 0);
@@ -1356,6 +1356,9 @@ static int __devinit nvidia_set_fbinfo(struct fb_info *info)
1356 info->pixmap.size = 8 * 1024; 1356 info->pixmap.size = 8 * 1024;
1357 info->pixmap.flags = FB_PIXMAP_SYSTEM; 1357 info->pixmap.flags = FB_PIXMAP_SYSTEM;
1358 1358
1359 if (!hwcur)
1360 info->fbops->fb_cursor = soft_cursor;
1361
1359 info->var.accel_flags = (!noaccel); 1362 info->var.accel_flags = (!noaccel);
1360 1363
1361 switch (par->Architecture) { 1364 switch (par->Architecture) {
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index e0dad948467b..2e11b601c488 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -67,6 +67,7 @@
67#include <linux/init.h> 67#include <linux/init.h>
68#include <linux/pci.h> 68#include <linux/pci.h>
69#include <linux/ioport.h> 69#include <linux/ioport.h>
70#include <linux/ctype.h>
70 71
71#include <video/fbcon.h> 72#include <video/fbcon.h>
72#include <video/fbcon-mfb.h> 73#include <video/fbcon-mfb.h>
@@ -2594,7 +2595,7 @@ static char *pm3fb_boardnum_setup(char *options, unsigned long *bn)
2594{ 2595{
2595 char *next; 2596 char *next;
2596 2597
2597 if (!(CHAR_IS_NUM(options[0]))) { 2598 if (!(isdigit(options[0]))) {
2598 (*bn) = 0; 2599 (*bn) = 0;
2599 return (options); 2600 return (options);
2600 } 2601 }
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 34d4dcc0320a..194eed0a238c 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -260,9 +260,9 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
260 } 260 }
261 261
262#ifdef CONFIG_CPU_FREQ 262#ifdef CONFIG_CPU_FREQ
263 DPRINTK("dma period = %d ps, clock = %d kHz\n", 263 pr_debug("pxafb: dma period = %d ps, clock = %d kHz\n",
264 pxafb_display_dma_period(var), 264 pxafb_display_dma_period(var),
265 get_clk_frequency_khz(0)); 265 get_clk_frequency_khz(0));
266#endif 266#endif
267 267
268 return 0; 268 return 0;
@@ -270,7 +270,7 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
270 270
271static inline void pxafb_set_truecolor(u_int is_true_color) 271static inline void pxafb_set_truecolor(u_int is_true_color)
272{ 272{
273 DPRINTK("true_color = %d\n", is_true_color); 273 pr_debug("pxafb: true_color = %d\n", is_true_color);
274 // do your machine-specific setup if needed 274 // do your machine-specific setup if needed
275} 275}
276 276
@@ -284,7 +284,7 @@ static int pxafb_set_par(struct fb_info *info)
284 struct fb_var_screeninfo *var = &info->var; 284 struct fb_var_screeninfo *var = &info->var;
285 unsigned long palette_mem_size; 285 unsigned long palette_mem_size;
286 286
287 DPRINTK("set_par\n"); 287 pr_debug("pxafb: set_par\n");
288 288
289 if (var->bits_per_pixel == 16) 289 if (var->bits_per_pixel == 16)
290 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; 290 fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
@@ -308,7 +308,7 @@ static int pxafb_set_par(struct fb_info *info)
308 308
309 palette_mem_size = fbi->palette_size * sizeof(u16); 309 palette_mem_size = fbi->palette_size * sizeof(u16);
310 310
311 DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size); 311 pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
312 312
313 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); 313 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
314 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; 314 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
@@ -369,7 +369,7 @@ static int pxafb_blank(int blank, struct fb_info *info)
369 struct pxafb_info *fbi = (struct pxafb_info *)info; 369 struct pxafb_info *fbi = (struct pxafb_info *)info;
370 int i; 370 int i;
371 371
372 DPRINTK("pxafb_blank: blank=%d\n", blank); 372 pr_debug("pxafb: blank=%d\n", blank);
373 373
374 switch (blank) { 374 switch (blank) {
375 case FB_BLANK_POWERDOWN: 375 case FB_BLANK_POWERDOWN:
@@ -508,15 +508,15 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
508 u_long flags; 508 u_long flags;
509 u_int lines_per_panel, pcd = get_pcd(var->pixclock); 509 u_int lines_per_panel, pcd = get_pcd(var->pixclock);
510 510
511 DPRINTK("Configuring PXA LCD\n"); 511 pr_debug("pxafb: Configuring PXA LCD\n");
512 512
513 DPRINTK("var: xres=%d hslen=%d lm=%d rm=%d\n", 513 pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n",
514 var->xres, var->hsync_len, 514 var->xres, var->hsync_len,
515 var->left_margin, var->right_margin); 515 var->left_margin, var->right_margin);
516 DPRINTK("var: yres=%d vslen=%d um=%d bm=%d\n", 516 pr_debug("var: yres=%d vslen=%d um=%d bm=%d\n",
517 var->yres, var->vsync_len, 517 var->yres, var->vsync_len,
518 var->upper_margin, var->lower_margin); 518 var->upper_margin, var->lower_margin);
519 DPRINTK("var: pixclock=%d pcd=%d\n", var->pixclock, pcd); 519 pr_debug("var: pixclock=%d pcd=%d\n", var->pixclock, pcd);
520 520
521#if DEBUG_VAR 521#if DEBUG_VAR
522 if (var->xres < 16 || var->xres > 1024) 522 if (var->xres < 16 || var->xres > 1024)
@@ -589,10 +589,10 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
589 if (pcd) 589 if (pcd)
590 new_regs.lccr3 |= LCCR3_PixClkDiv(pcd); 590 new_regs.lccr3 |= LCCR3_PixClkDiv(pcd);
591 591
592 DPRINTK("nlccr0 = 0x%08x\n", new_regs.lccr0); 592 pr_debug("nlccr0 = 0x%08x\n", new_regs.lccr0);
593 DPRINTK("nlccr1 = 0x%08x\n", new_regs.lccr1); 593 pr_debug("nlccr1 = 0x%08x\n", new_regs.lccr1);
594 DPRINTK("nlccr2 = 0x%08x\n", new_regs.lccr2); 594 pr_debug("nlccr2 = 0x%08x\n", new_regs.lccr2);
595 DPRINTK("nlccr3 = 0x%08x\n", new_regs.lccr3); 595 pr_debug("nlccr3 = 0x%08x\n", new_regs.lccr3);
596 596
597 /* Update shadow copy atomically */ 597 /* Update shadow copy atomically */
598 local_irq_save(flags); 598 local_irq_save(flags);
@@ -637,24 +637,24 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
637 } 637 }
638 638
639#if 0 639#if 0
640 DPRINTK("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu); 640 pr_debug("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu);
641 DPRINTK("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu); 641 pr_debug("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu);
642 DPRINTK("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu); 642 pr_debug("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu);
643 DPRINTK("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma); 643 pr_debug("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma);
644 DPRINTK("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma); 644 pr_debug("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma);
645 DPRINTK("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma); 645 pr_debug("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma);
646 646
647 DPRINTK("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr); 647 pr_debug("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr);
648 DPRINTK("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr); 648 pr_debug("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr);
649 DPRINTK("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr); 649 pr_debug("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr);
650 650
651 DPRINTK("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr); 651 pr_debug("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr);
652 DPRINTK("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr); 652 pr_debug("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr);
653 DPRINTK("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr); 653 pr_debug("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr);
654 654
655 DPRINTK("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd); 655 pr_debug("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd);
656 DPRINTK("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd); 656 pr_debug("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd);
657 DPRINTK("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd); 657 pr_debug("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd);
658#endif 658#endif
659 659
660 fbi->reg_lccr0 = new_regs.lccr0; 660 fbi->reg_lccr0 = new_regs.lccr0;
@@ -684,7 +684,7 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
684 */ 684 */
685static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on) 685static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
686{ 686{
687 DPRINTK("backlight o%s\n", on ? "n" : "ff"); 687 pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff");
688 688
689 if (pxafb_backlight_power) 689 if (pxafb_backlight_power)
690 pxafb_backlight_power(on); 690 pxafb_backlight_power(on);
@@ -692,7 +692,7 @@ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
692 692
693static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) 693static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
694{ 694{
695 DPRINTK("LCD power o%s\n", on ? "n" : "ff"); 695 pr_debug("pxafb: LCD power o%s\n", on ? "n" : "ff");
696 696
697 if (pxafb_lcd_power) 697 if (pxafb_lcd_power)
698 pxafb_lcd_power(on); 698 pxafb_lcd_power(on);
@@ -740,13 +740,13 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
740 740
741static void pxafb_enable_controller(struct pxafb_info *fbi) 741static void pxafb_enable_controller(struct pxafb_info *fbi)
742{ 742{
743 DPRINTK("Enabling LCD controller\n"); 743 pr_debug("pxafb: Enabling LCD controller\n");
744 DPRINTK("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0); 744 pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0);
745 DPRINTK("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1); 745 pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1);
746 DPRINTK("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); 746 pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0);
747 DPRINTK("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); 747 pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1);
748 DPRINTK("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); 748 pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2);
749 DPRINTK("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3); 749 pr_debug("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3);
750 750
751 /* enable LCD controller clock */ 751 /* enable LCD controller clock */
752 pxa_set_cken(CKEN16_LCD, 1); 752 pxa_set_cken(CKEN16_LCD, 1);
@@ -761,19 +761,19 @@ static void pxafb_enable_controller(struct pxafb_info *fbi)
761 FDADR1 = fbi->fdadr1; 761 FDADR1 = fbi->fdadr1;
762 LCCR0 |= LCCR0_ENB; 762 LCCR0 |= LCCR0_ENB;
763 763
764 DPRINTK("FDADR0 0x%08x\n", (unsigned int) FDADR0); 764 pr_debug("FDADR0 0x%08x\n", (unsigned int) FDADR0);
765 DPRINTK("FDADR1 0x%08x\n", (unsigned int) FDADR1); 765 pr_debug("FDADR1 0x%08x\n", (unsigned int) FDADR1);
766 DPRINTK("LCCR0 0x%08x\n", (unsigned int) LCCR0); 766 pr_debug("LCCR0 0x%08x\n", (unsigned int) LCCR0);
767 DPRINTK("LCCR1 0x%08x\n", (unsigned int) LCCR1); 767 pr_debug("LCCR1 0x%08x\n", (unsigned int) LCCR1);
768 DPRINTK("LCCR2 0x%08x\n", (unsigned int) LCCR2); 768 pr_debug("LCCR2 0x%08x\n", (unsigned int) LCCR2);
769 DPRINTK("LCCR3 0x%08x\n", (unsigned int) LCCR3); 769 pr_debug("LCCR3 0x%08x\n", (unsigned int) LCCR3);
770} 770}
771 771
772static void pxafb_disable_controller(struct pxafb_info *fbi) 772static void pxafb_disable_controller(struct pxafb_info *fbi)
773{ 773{
774 DECLARE_WAITQUEUE(wait, current); 774 DECLARE_WAITQUEUE(wait, current);
775 775
776 DPRINTK("Disabling LCD controller\n"); 776 pr_debug("pxafb: disabling LCD controller\n");
777 777
778 set_current_state(TASK_UNINTERRUPTIBLE); 778 set_current_state(TASK_UNINTERRUPTIBLE);
779 add_wait_queue(&fbi->ctrlr_wait, &wait); 779 add_wait_queue(&fbi->ctrlr_wait, &wait);
@@ -1039,7 +1039,7 @@ static int __init pxafb_map_video_memory(struct pxafb_info *fbi)
1039 fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16; 1039 fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16;
1040 1040
1041 palette_mem_size = fbi->palette_size * sizeof(u16); 1041 palette_mem_size = fbi->palette_size * sizeof(u16);
1042 DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size); 1042 pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
1043 1043
1044 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); 1044 fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
1045 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; 1045 fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h
index 22c00be786a8..47f41f70db7a 100644
--- a/drivers/video/pxafb.h
+++ b/drivers/video/pxafb.h
@@ -114,15 +114,6 @@ struct pxafb_info {
114#define PXA_NAME "PXA" 114#define PXA_NAME "PXA"
115 115
116/* 116/*
117 * Debug macros
118 */
119#if DEBUG
120# define DPRINTK(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ## args)
121#else
122# define DPRINTK(fmt, args...)
123#endif
124
125/*
126 * Minimum X and Y resolutions 117 * Minimum X and Y resolutions
127 */ 118 */
128#define MIN_XRES 64 119#define MIN_XRES 64
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index 00c0223a352e..5ab79afb53b7 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -228,8 +228,8 @@ static int s3c2410fb_check_var(struct fb_var_screeninfo *var,
228 * information 228 * information
229*/ 229*/
230 230
231static int s3c2410fb_activate_var(struct s3c2410fb_info *fbi, 231static void s3c2410fb_activate_var(struct s3c2410fb_info *fbi,
232 struct fb_var_screeninfo *var) 232 struct fb_var_screeninfo *var)
233{ 233{
234 fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_MODEMASK; 234 fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_MODEMASK;
235 235
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 959404ad68f4..3c98457783c4 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -274,10 +274,13 @@ int savagefb_probe_i2c_connector(struct fb_info *info, u8 **out_edid)
274 274
275 if (!edid) { 275 if (!edid) {
276 /* try to get from firmware */ 276 /* try to get from firmware */
277 edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 277 const u8 *e = fb_firmware_edid(info->device);
278 if (edid) 278
279 memcpy(edid, fb_firmware_edid(info->device), 279 if (e) {
280 EDID_LENGTH); 280 edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
281 if (edid)
282 memcpy(edid, e, EDID_LENGTH);
283 }
281 } 284 }
282 285
283 if (out_edid) 286 if (out_edid)
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h
index d6f94742c9f2..ea17f7e0482c 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/savage/savagefb.h
@@ -60,8 +60,6 @@
60 60
61#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) 61#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
62 62
63#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) || (chip == S3_PROSAVAGEDDR))
64
65/* Chip tags. These are used to group the adapters into 63/* Chip tags. These are used to group the adapters into
66 * related families. 64 * related families.
67 */ 65 */
@@ -74,8 +72,6 @@ typedef enum {
74 S3_PROSAVAGE, 72 S3_PROSAVAGE,
75 S3_SUPERSAVAGE, 73 S3_SUPERSAVAGE,
76 S3_SAVAGE2000, 74 S3_SAVAGE2000,
77 S3_PROSAVAGEDDR,
78 S3_TWISTER,
79 S3_LAST 75 S3_LAST
80} savage_chipset; 76} savage_chipset;
81 77
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index b5ca3ef8271f..7c285455c924 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1773,8 +1773,7 @@ static int __devinit savage_init_hw (struct savagefb_par *par)
1773 } 1773 }
1774 } 1774 }
1775 1775
1776 if (S3_SAVAGE_MOBILE_SERIES(par->chip) || 1776 if (S3_SAVAGE_MOBILE_SERIES(par->chip) && !par->crtonly)
1777 (S3_MOBILE_TWISTER_SERIES(par->chip) && !par->crtonly))
1778 par->display_type = DISP_LCD; 1777 par->display_type = DISP_LCD;
1779 else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi)) 1778 else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi))
1780 par->display_type = DISP_DFP; 1779 par->display_type = DISP_DFP;
@@ -1783,7 +1782,7 @@ static int __devinit savage_init_hw (struct savagefb_par *par)
1783 1782
1784 /* Check LCD panel parrmation */ 1783 /* Check LCD panel parrmation */
1785 1784
1786 if (par->chip == S3_SAVAGE_MX) { 1785 if (par->display_type == DISP_LCD) {
1787 unsigned char cr6b = VGArCR( 0x6b ); 1786 unsigned char cr6b = VGArCR( 0x6b );
1788 1787
1789 int panelX = (VGArSEQ (0x61) + 1788 int panelX = (VGArSEQ (0x61) +
@@ -1922,15 +1921,15 @@ static int __devinit savage_init_fb_info (struct fb_info *info,
1922 snprintf (info->fix.id, 16, "ProSavageKM"); 1921 snprintf (info->fix.id, 16, "ProSavageKM");
1923 break; 1922 break;
1924 case FB_ACCEL_S3TWISTER_P: 1923 case FB_ACCEL_S3TWISTER_P:
1925 par->chip = S3_TWISTER; 1924 par->chip = S3_PROSAVAGE;
1926 snprintf (info->fix.id, 16, "TwisterP"); 1925 snprintf (info->fix.id, 16, "TwisterP");
1927 break; 1926 break;
1928 case FB_ACCEL_S3TWISTER_K: 1927 case FB_ACCEL_S3TWISTER_K:
1929 par->chip = S3_TWISTER; 1928 par->chip = S3_PROSAVAGE;
1930 snprintf (info->fix.id, 16, "TwisterK"); 1929 snprintf (info->fix.id, 16, "TwisterK");
1931 break; 1930 break;
1932 case FB_ACCEL_PROSAVAGE_DDR: 1931 case FB_ACCEL_PROSAVAGE_DDR:
1933 par->chip = S3_PROSAVAGEDDR; 1932 par->chip = S3_PROSAVAGE;
1934 snprintf (info->fix.id, 16, "ProSavageDDR"); 1933 snprintf (info->fix.id, 16, "ProSavageDDR");
1935 break; 1934 break;
1936 case FB_ACCEL_PROSAVAGE_DDRK: 1935 case FB_ACCEL_PROSAVAGE_DDRK: