aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acorn/char/defkeymap-l7200.c68
-rw-r--r--drivers/acpi/ec.c17
-rw-r--r--drivers/acpi/pci_irq.c98
-rw-r--r--drivers/acpi/utilities/utobject.c2
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c53
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-acpi.c8
-rw-r--r--drivers/ata/libata-core.c34
-rw-r--r--drivers/ata/libata-eh.c10
-rw-r--r--drivers/ata/libata-pmp.c4
-rw-r--r--drivers/ata/libata-scsi.c22
-rw-r--r--drivers/ata/libata-sff.c36
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_hpt366.c6
-rw-r--r--drivers/ata/pata_hpt37x.c6
-rw-r--r--drivers/ata/pata_pdc2027x.c2
-rw-r--r--drivers/ata/pata_rb500_cf.c314
-rw-r--r--drivers/ata/pata_serverworks.c2
-rw-r--r--drivers/ata/sata_fsl.c8
-rw-r--r--drivers/ata/sata_svw.c77
-rw-r--r--drivers/atm/firestream.c4
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/idt77252.c12
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/main.c108
-rw-r--r--drivers/base/sys.c4
-rw-r--r--drivers/base/transport_class.c4
-rw-r--r--drivers/block/cciss.c256
-rw-r--r--drivers/block/cciss_scsi.c10
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/pktcdvd.c21
-rw-r--r--drivers/block/viodasd.c3
-rw-r--r--drivers/bluetooth/hci_usb.c4
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/char/defkeymap.c_shipped68
-rw-r--r--drivers/char/esp.c1
-rw-r--r--drivers/char/isicom.c2
-rw-r--r--drivers/char/nozomi.c61
-rw-r--r--drivers/char/pcmcia/ipwireless/network.c5
-rw-r--r--drivers/char/riscom8.c6
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/specialix.c1
-rw-r--r--drivers/char/vt.c1
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c80
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c60
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c138
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h24
-rw-r--r--drivers/connector/connector.c2
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/fsldma.c1067
-rw-r--r--drivers/dma/fsldma.h189
-rw-r--r--drivers/dma/ioat_dma.c2
-rw-r--r--drivers/firewire/fw-card.c61
-rw-r--r--drivers/firewire/fw-cdev.c17
-rw-r--r--drivers/firewire/fw-device.c69
-rw-r--r--drivers/firewire/fw-device.h18
-rw-r--r--drivers/firewire/fw-sbp2.c392
-rw-r--r--drivers/firewire/fw-topology.c1
-rw-r--r--drivers/firewire/fw-transaction.h2
-rw-r--r--drivers/firmware/dmi_scan.c82
-rw-r--r--drivers/gpio/pca953x.c1
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c10
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c7
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa.c25
-rw-r--r--drivers/i2c/chips/Makefile7
-rw-r--r--drivers/i2c/i2c-core.c6
-rw-r--r--drivers/ide/Kconfig30
-rw-r--r--drivers/ide/ide-cd.c30
-rw-r--r--drivers/ide/ide-cd_ioctl.c4
-rw-r--r--drivers/ide/ide-disk.c5
-rw-r--r--drivers/ide/ide-dma.c54
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/ide-tape.c5
-rw-r--r--drivers/ide/ide.c24
-rw-r--r--drivers/ide/legacy/qd65xx.c43
-rw-r--r--drivers/ide/pci/cmd640.c3
-rw-r--r--drivers/ide/pci/hpt366.c12
-rw-r--r--drivers/ide/ppc/pmac.c4
-rw-r--r--drivers/ieee1394/sbp2.c15
-rw-r--r--drivers/ieee1394/sbp2.h2
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/fmr_pool.c38
-rw-r--r--drivers/infiniband/core/iwcm.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c10
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c5
-rw-r--r--drivers/infiniband/hw/nes/nes.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.h15
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c15
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c13
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c10
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c47
-rw-r--r--drivers/input/misc/Kconfig6
-rw-r--r--drivers/input/serio/i8042.h2
-rw-r--r--drivers/isdn/gigaset/common.c6
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c34
-rw-r--r--drivers/isdn/i4l/isdn_common.c4
-rw-r--r--drivers/isdn/i4l/isdn_ttyfax.c3
-rw-r--r--drivers/isdn/i4l/isdn_v110.c2
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c6
-rw-r--r--drivers/lguest/core.c15
-rw-r--r--drivers/lguest/lguest_user.c15
-rw-r--r--drivers/lguest/page_tables.c2
-rw-r--r--drivers/macintosh/mediabay.c3
-rw-r--r--drivers/macintosh/via-pmu-backlight.c5
-rw-r--r--drivers/macintosh/via-pmu.c2
-rw-r--r--drivers/md/bitmap.c23
-rw-r--r--drivers/md/md.c50
-rw-r--r--drivers/md/raid1.c73
-rw-r--r--drivers/md/raid10.c87
-rw-r--r--drivers/memstick/Kconfig2
-rw-r--r--drivers/memstick/core/memstick.c33
-rw-r--r--drivers/memstick/core/mspro_block.c106
-rw-r--r--drivers/memstick/host/Kconfig10
-rw-r--r--drivers/memstick/host/Makefile6
-rw-r--r--drivers/memstick/host/jmb38x_ms.c945
-rw-r--r--drivers/memstick/host/tifm_ms.c569
-rw-r--r--drivers/message/fusion/mptbase.c29
-rw-r--r--drivers/message/fusion/mptbase.h2
-rw-r--r--drivers/message/fusion/mptsas.c5
-rw-r--r--drivers/message/fusion/mptscsih.c14
-rw-r--r--drivers/mfd/sm501.c208
-rw-r--r--drivers/misc/thinkpad_acpi.c3
-rw-r--r--drivers/misc/tifm_7xx1.c2
-rw-r--r--drivers/mtd/ubi/build.c4
-rw-r--r--drivers/mtd/ubi/ubi.h10
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/net/Kconfig24
-rw-r--r--drivers/net/ac3200.c7
-rw-r--r--drivers/net/apne.c7
-rw-r--r--drivers/net/appletalk/ltpc.c3
-rw-r--r--drivers/net/arcnet/capmode.c6
-rw-r--r--drivers/net/atarilance.c5
-rw-r--r--drivers/net/bnx2.c50
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/bnx2x.c2663
-rw-r--r--drivers/net/bnx2x.h56
-rw-r--r--drivers/net/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/bnx2x_hsi.h428
-rw-r--r--drivers/net/bnx2x_init.h12
-rw-r--r--drivers/net/bnx2x_reg.h212
-rw-r--r--drivers/net/cs89x0.c12
-rw-r--r--drivers/net/e100.c29
-rw-r--r--drivers/net/e1000e/82571.c22
-rw-r--r--drivers/net/e1000e/defines.h10
-rw-r--r--drivers/net/e1000e/e1000.h3
-rw-r--r--drivers/net/e1000e/hw.h4
-rw-r--r--drivers/net/e1000e/ich8lan.c24
-rw-r--r--drivers/net/e1000e/lib.c50
-rw-r--r--drivers/net/e1000e/netdev.c18
-rw-r--r--drivers/net/e1000e/phy.c10
-rw-r--r--drivers/net/ehea/ehea.h34
-rw-r--r--drivers/net/ehea/ehea_main.c281
-rw-r--r--drivers/net/enc28j60.c3
-rw-r--r--drivers/net/fec.c8
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c7
-rw-r--r--drivers/net/gianfar.c14
-rw-r--r--drivers/net/igb/igb_main.c15
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/mv643xx_eth.c1
-rw-r--r--drivers/net/niu.c9
-rw-r--r--drivers/net/niu.h2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c10
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/davicom.c17
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/pppol2tp.c77
-rw-r--r--drivers/net/ps3_gelic_wireless.c1
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sis190.c15
-rw-r--r--drivers/net/sky2.c123
-rw-r--r--drivers/net/sky2.h27
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tlan.c64
-rw-r--r--drivers/net/tulip/de2104x.c3
-rw-r--r--drivers/net/tulip/uli526x.c12
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/veth.c53
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath5k/base.c20
-rw-r--r--drivers/net/wireless/ath5k/hw.c8
-rw-r--r--drivers/net/wireless/b43/Kconfig1
-rw-r--r--drivers/net/wireless/b43legacy/Kconfig1
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/bcm43xx/Kconfig2
-rw-r--r--drivers/net/wireless/libertas/cmd.c2
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c10
-rw-r--r--drivers/net/wireless/libertas/decl.h1
-rw-r--r--drivers/net/wireless/libertas/main.c17
-rw-r--r--drivers/net/wireless/p54common.c20
-rw-r--r--drivers/net/wireless/p54common.h8
-rw-r--r--drivers/net/wireless/p54usb.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c73
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c11
-rw-r--r--drivers/net/wireless/rtl8180_dev.c4
-rw-r--r--drivers/net/wireless/rtl8187_dev.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c12
-rw-r--r--drivers/parisc/Kconfig5
-rw-r--r--drivers/parisc/ccio-dma.c27
-rw-r--r--drivers/parisc/iommu-helpers.h6
-rw-r--r--drivers/parisc/sba_iommu.c52
-rw-r--r--drivers/pci/bus.c10
-rw-r--r--drivers/pci/dmar.c9
-rw-r--r--drivers/pci/hotplug-pci.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c1
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c5
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c2
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c2
-rw-r--r--drivers/pci/intel-iommu.c7
-rw-r--r--drivers/pci/intel-iommu.h5
-rw-r--r--drivers/pci/iova.c3
-rw-r--r--drivers/pci/iova.h3
-rw-r--r--drivers/pci/pci.c1
-rw-r--r--drivers/pci/probe.c8
-rw-r--r--drivers/pci/quirks.c41
-rw-r--r--drivers/pci/rom.c3
-rw-r--r--drivers/pnp/quirks.c100
-rw-r--r--drivers/rapidio/rio-driver.c8
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c316
-rw-r--r--drivers/s390/block/dasd_3990_erp.c6
-rw-r--r--drivers/s390/block/dasd_proc.c4
-rw-r--r--drivers/s390/char/defkeymap.c4
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c12
-rw-r--r--drivers/s390/net/claw.c39
-rw-r--r--drivers/scsi/Kconfig10
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c11
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.h3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c14
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c308
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c26
-rw-r--r--drivers/scsi/arm/fas216.h2
-rw-r--r--drivers/scsi/gdth.c112
-rw-r--r--drivers/scsi/gdth.h1
-rw-r--r--drivers/scsi/gdth_proc.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c9
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c39
-rw-r--r--drivers/scsi/libsas/sas_port.c11
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c102
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c18
-rw-r--r--drivers/scsi/megaraid.c10
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/mvsas.c2969
-rw-r--r--drivers/scsi/ps3rom.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c39
-rw-r--r--drivers/scsi/qlogicpti.c12
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_tgt_lib.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c80
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/ses.c126
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c44
-rw-r--r--drivers/serial/8250_pnp.c2
-rw-r--r--drivers/serial/Kconfig6
-rw-r--r--drivers/serial/atmel_serial.c2
-rw-r--r--drivers/serial/bfin_5xx.c281
-rw-r--r--drivers/serial/m32r_sio.c2
-rw-r--r--drivers/serial/of_serial.c2
-rw-r--r--drivers/serial/sh-sci.c2
-rw-r--r--drivers/sh/maple/maple.c66
-rw-r--r--drivers/spi/atmel_spi.c10
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c11
-rw-r--r--drivers/spi/pxa2xx_spi.c41
-rw-r--r--drivers/ssb/Kconfig11
-rw-r--r--drivers/ssb/Makefile3
-rw-r--r--drivers/ssb/driver_chipcommon.c65
-rw-r--r--drivers/ssb/driver_extif.c25
-rw-r--r--drivers/ssb/driver_pcicore.c52
-rw-r--r--drivers/ssb/embedded.c132
-rw-r--r--drivers/ssb/main.c4
-rw-r--r--drivers/ssb/ssb_private.h4
-rw-r--r--drivers/usb/core/Kconfig9
-rw-r--r--drivers/usb/core/quirks.c21
-rw-r--r--drivers/usb/core/usb.c8
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/printer.c1
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.c88
-rw-r--r--drivers/usb/gadget/pxa2xx_udc.h4
-rw-r--r--drivers/usb/host/ehci-hcd.c62
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c15
-rw-r--r--drivers/usb/host/isp116x.h1
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/host/u132-hcd.c15
-rw-r--r--drivers/usb/serial/cypress_m8.c2
-rw-r--r--drivers/usb/serial/cypress_m8.h4
-rw-r--r--drivers/usb/serial/ftdi_sio.c26
-rw-r--r--drivers/usb/serial/ftdi_sio.h10
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/mos7840.c15
-rw-r--r--drivers/usb/serial/option.c79
-rw-r--r--drivers/usb/storage/protocol.c2
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/video/Kconfig13
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/bf54x-lq043fb.c6
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c685
-rw-r--r--drivers/video/chipsfb.c2
-rw-r--r--drivers/video/hitfb.c4
-rw-r--r--drivers/video/mbx/mbxfb.c2
-rw-r--r--drivers/video/nvidia/nvidia.c2
-rw-r--r--drivers/video/pvr2fb.c12
-rw-r--r--drivers/video/sm501fb.c20
-rw-r--r--drivers/video/stifb.c22
-rw-r--r--drivers/video/tridentfb.c55
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--drivers/w1/masters/ds1wm.c23
-rw-r--r--drivers/watchdog/cpu5wdt.c4
-rw-r--r--drivers/watchdog/hpwdt.c214
-rw-r--r--drivers/watchdog/it8712f_wdt.c78
-rw-r--r--drivers/watchdog/machzwd.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c4
-rw-r--r--drivers/watchdog/pcwd_usb.c4
-rw-r--r--drivers/watchdog/s3c2410_wdt.c8
-rw-r--r--drivers/watchdog/shwdt.c2
368 files changed, 13732 insertions, 4392 deletions
diff --git a/drivers/acorn/char/defkeymap-l7200.c b/drivers/acorn/char/defkeymap-l7200.c
index 28a5fbc6aa1a..93d80a1c36f9 100644
--- a/drivers/acorn/char/defkeymap-l7200.c
+++ b/drivers/acorn/char/defkeymap-l7200.c
@@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = {
347}; 347};
348 348
349struct kbdiacruc accent_table[MAX_DIACR] = { 349struct kbdiacruc accent_table[MAX_DIACR] = {
350 {'`', 'A', '\300'}, {'`', 'a', '\340'}, 350 {'`', 'A', 0300}, {'`', 'a', 0340},
351 {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, 351 {'\'', 'A', 0301}, {'\'', 'a', 0341},
352 {'^', 'A', '\302'}, {'^', 'a', '\342'}, 352 {'^', 'A', 0302}, {'^', 'a', 0342},
353 {'~', 'A', '\303'}, {'~', 'a', '\343'}, 353 {'~', 'A', 0303}, {'~', 'a', 0343},
354 {'"', 'A', '\304'}, {'"', 'a', '\344'}, 354 {'"', 'A', 0304}, {'"', 'a', 0344},
355 {'O', 'A', '\305'}, {'o', 'a', '\345'}, 355 {'O', 'A', 0305}, {'o', 'a', 0345},
356 {'0', 'A', '\305'}, {'0', 'a', '\345'}, 356 {'0', 'A', 0305}, {'0', 'a', 0345},
357 {'A', 'A', '\305'}, {'a', 'a', '\345'}, 357 {'A', 'A', 0305}, {'a', 'a', 0345},
358 {'A', 'E', '\306'}, {'a', 'e', '\346'}, 358 {'A', 'E', 0306}, {'a', 'e', 0346},
359 {',', 'C', '\307'}, {',', 'c', '\347'}, 359 {',', 'C', 0307}, {',', 'c', 0347},
360 {'`', 'E', '\310'}, {'`', 'e', '\350'}, 360 {'`', 'E', 0310}, {'`', 'e', 0350},
361 {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, 361 {'\'', 'E', 0311}, {'\'', 'e', 0351},
362 {'^', 'E', '\312'}, {'^', 'e', '\352'}, 362 {'^', 'E', 0312}, {'^', 'e', 0352},
363 {'"', 'E', '\313'}, {'"', 'e', '\353'}, 363 {'"', 'E', 0313}, {'"', 'e', 0353},
364 {'`', 'I', '\314'}, {'`', 'i', '\354'}, 364 {'`', 'I', 0314}, {'`', 'i', 0354},
365 {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, 365 {'\'', 'I', 0315}, {'\'', 'i', 0355},
366 {'^', 'I', '\316'}, {'^', 'i', '\356'}, 366 {'^', 'I', 0316}, {'^', 'i', 0356},
367 {'"', 'I', '\317'}, {'"', 'i', '\357'}, 367 {'"', 'I', 0317}, {'"', 'i', 0357},
368 {'-', 'D', '\320'}, {'-', 'd', '\360'}, 368 {'-', 'D', 0320}, {'-', 'd', 0360},
369 {'~', 'N', '\321'}, {'~', 'n', '\361'}, 369 {'~', 'N', 0321}, {'~', 'n', 0361},
370 {'`', 'O', '\322'}, {'`', 'o', '\362'}, 370 {'`', 'O', 0322}, {'`', 'o', 0362},
371 {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, 371 {'\'', 'O', 0323}, {'\'', 'o', 0363},
372 {'^', 'O', '\324'}, {'^', 'o', '\364'}, 372 {'^', 'O', 0324}, {'^', 'o', 0364},
373 {'~', 'O', '\325'}, {'~', 'o', '\365'}, 373 {'~', 'O', 0325}, {'~', 'o', 0365},
374 {'"', 'O', '\326'}, {'"', 'o', '\366'}, 374 {'"', 'O', 0326}, {'"', 'o', 0366},
375 {'/', 'O', '\330'}, {'/', 'o', '\370'}, 375 {'/', 'O', 0330}, {'/', 'o', 0370},
376 {'`', 'U', '\331'}, {'`', 'u', '\371'}, 376 {'`', 'U', 0331}, {'`', 'u', 0371},
377 {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, 377 {'\'', 'U', 0332}, {'\'', 'u', 0372},
378 {'^', 'U', '\333'}, {'^', 'u', '\373'}, 378 {'^', 'U', 0333}, {'^', 'u', 0373},
379 {'"', 'U', '\334'}, {'"', 'u', '\374'}, 379 {'"', 'U', 0334}, {'"', 'u', 0374},
380 {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, 380 {'\'', 'Y', 0335}, {'\'', 'y', 0375},
381 {'T', 'H', '\336'}, {'t', 'h', '\376'}, 381 {'T', 'H', 0336}, {'t', 'h', 0376},
382 {'s', 's', '\337'}, {'"', 'y', '\377'}, 382 {'s', 's', 0337}, {'"', 'y', 0377},
383 {'s', 'z', '\337'}, {'i', 'j', '\377'}, 383 {'s', 'z', 0337}, {'i', 'j', 0377},
384}; 384};
385 385
386unsigned int accent_table_size = 68; 386unsigned int accent_table_size = 68;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index caf873c14bfb..e7e197e3a4ff 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -129,6 +129,7 @@ static struct acpi_ec {
129 struct mutex lock; 129 struct mutex lock;
130 wait_queue_head_t wait; 130 wait_queue_head_t wait;
131 struct list_head list; 131 struct list_head list;
132 atomic_t irq_count;
132 u8 handlers_installed; 133 u8 handlers_installed;
133} *boot_ec, *first_ec; 134} *boot_ec, *first_ec;
134 135
@@ -181,6 +182,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
181{ 182{
182 int ret = 0; 183 int ret = 0;
183 184
185 atomic_set(&ec->irq_count, 0);
186
184 if (unlikely(event == ACPI_EC_EVENT_OBF_1 && 187 if (unlikely(event == ACPI_EC_EVENT_OBF_1 &&
185 test_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags))) 188 test_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags)))
186 force_poll = 1; 189 force_poll = 1;
@@ -227,6 +230,7 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
227 while (time_before(jiffies, delay)) { 230 while (time_before(jiffies, delay)) {
228 if (acpi_ec_check_status(ec, event)) 231 if (acpi_ec_check_status(ec, event))
229 goto end; 232 goto end;
233 msleep(5);
230 } 234 }
231 } 235 }
232 pr_err(PREFIX "acpi_ec_wait timeout," 236 pr_err(PREFIX "acpi_ec_wait timeout,"
@@ -529,6 +533,13 @@ static u32 acpi_ec_gpe_handler(void *data)
529 struct acpi_ec *ec = data; 533 struct acpi_ec *ec = data;
530 534
531 pr_debug(PREFIX "~~~> interrupt\n"); 535 pr_debug(PREFIX "~~~> interrupt\n");
536 atomic_inc(&ec->irq_count);
537 if (atomic_read(&ec->irq_count) > 5) {
538 pr_err(PREFIX "GPE storm detected, disabling EC GPE\n");
539 acpi_disable_gpe(NULL, ec->gpe, ACPI_ISR);
540 clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
541 return ACPI_INTERRUPT_HANDLED;
542 }
532 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); 543 clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
533 if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) 544 if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
534 wake_up(&ec->wait); 545 wake_up(&ec->wait);
@@ -943,11 +954,7 @@ int __init acpi_ec_ecdt_probe(void)
943 boot_ec->command_addr = ecdt_ptr->control.address; 954 boot_ec->command_addr = ecdt_ptr->control.address;
944 boot_ec->data_addr = ecdt_ptr->data.address; 955 boot_ec->data_addr = ecdt_ptr->data.address;
945 boot_ec->gpe = ecdt_ptr->gpe; 956 boot_ec->gpe = ecdt_ptr->gpe;
946 if (ACPI_FAILURE(acpi_get_handle(NULL, ecdt_ptr->id, 957 boot_ec->handle = ACPI_ROOT_OBJECT;
947 &boot_ec->handle))) {
948 pr_info("Failed to locate handle for boot EC\n");
949 boot_ec->handle = ACPI_ROOT_OBJECT;
950 }
951 } else { 958 } else {
952 /* This workaround is needed only on some broken machines, 959 /* This workaround is needed only on some broken machines,
953 * which require early EC, but fail to provide ECDT */ 960 * which require early EC, but fail to provide ECDT */
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 7f19859580c7..7af414a3c63e 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27 27
28#include <linux/dmi.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/init.h> 31#include <linux/init.h>
@@ -76,6 +77,101 @@ static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment,
76 return NULL; 77 return NULL;
77} 78}
78 79
80/* http://bugzilla.kernel.org/show_bug.cgi?id=4773 */
81static struct dmi_system_id medion_md9580[] = {
82 {
83 .ident = "Medion MD9580-F laptop",
84 .matches = {
85 DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"),
86 DMI_MATCH(DMI_PRODUCT_NAME, "A555"),
87 },
88 },
89 { }
90};
91
92/* http://bugzilla.kernel.org/show_bug.cgi?id=5044 */
93static struct dmi_system_id dell_optiplex[] = {
94 {
95 .ident = "Dell Optiplex GX1",
96 .matches = {
97 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
98 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX1 600S+"),
99 },
100 },
101 { }
102};
103
104/* http://bugzilla.kernel.org/show_bug.cgi?id=10138 */
105static struct dmi_system_id hp_t5710[] = {
106 {
107 .ident = "HP t5710",
108 .matches = {
109 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
110 DMI_MATCH(DMI_PRODUCT_NAME, "hp t5000 series"),
111 DMI_MATCH(DMI_BOARD_NAME, "098Ch"),
112 },
113 },
114 { }
115};
116
117struct prt_quirk {
118 struct dmi_system_id *system;
119 unsigned int segment;
120 unsigned int bus;
121 unsigned int device;
122 unsigned char pin;
123 char *source; /* according to BIOS */
124 char *actual_source;
125};
126
127/*
128 * These systems have incorrect _PRT entries. The BIOS claims the PCI
129 * interrupt at the listed segment/bus/device/pin is connected to the first
130 * link device, but it is actually connected to the second.
131 */
132static struct prt_quirk prt_quirks[] = {
133 { medion_md9580, 0, 0, 9, 'A',
134 "\\_SB_.PCI0.ISA.LNKA",
135 "\\_SB_.PCI0.ISA.LNKB"},
136 { dell_optiplex, 0, 0, 0xd, 'A',
137 "\\_SB_.LNKB",
138 "\\_SB_.LNKA"},
139 { hp_t5710, 0, 0, 1, 'A',
140 "\\_SB_.PCI0.LNK1",
141 "\\_SB_.PCI0.LNK3"},
142};
143
144static void
145do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt)
146{
147 int i;
148 struct prt_quirk *quirk;
149
150 for (i = 0; i < ARRAY_SIZE(prt_quirks); i++) {
151 quirk = &prt_quirks[i];
152
153 /* All current quirks involve link devices, not GSIs */
154 if (!prt->source)
155 continue;
156
157 if (dmi_check_system(quirk->system) &&
158 entry->id.segment == quirk->segment &&
159 entry->id.bus == quirk->bus &&
160 entry->id.device == quirk->device &&
161 entry->pin + 'A' == quirk->pin &&
162 !strcmp(prt->source, quirk->source) &&
163 strlen(prt->source) >= strlen(quirk->actual_source)) {
164 printk(KERN_WARNING PREFIX "firmware reports "
165 "%04x:%02x:%02x[%c] connected to %s; "
166 "changing to %s\n",
167 entry->id.segment, entry->id.bus,
168 entry->id.device, 'A' + entry->pin,
169 prt->source, quirk->actual_source);
170 strcpy(prt->source, quirk->actual_source);
171 }
172 }
173}
174
79static int 175static int
80acpi_pci_irq_add_entry(acpi_handle handle, 176acpi_pci_irq_add_entry(acpi_handle handle,
81 int segment, int bus, struct acpi_pci_routing_table *prt) 177 int segment, int bus, struct acpi_pci_routing_table *prt)
@@ -96,6 +192,8 @@ acpi_pci_irq_add_entry(acpi_handle handle,
96 entry->id.function = prt->address & 0xFFFF; 192 entry->id.function = prt->address & 0xFFFF;
97 entry->pin = prt->pin; 193 entry->pin = prt->pin;
98 194
195 do_prt_fixups(entry, prt);
196
99 /* 197 /*
100 * Type 1: Dynamic 198 * Type 1: Dynamic
101 * --------------- 199 * ---------------
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index 76ee766c84f9..e08b3fa6639f 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -432,7 +432,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
432 * element -- which is legal) 432 * element -- which is legal)
433 */ 433 */
434 if (!internal_object) { 434 if (!internal_object) {
435 *obj_length = 0; 435 *obj_length = sizeof(union acpi_object);
436 return_ACPI_STATUS(AE_OK); 436 return_ACPI_STATUS(AE_OK);
437 } 437 }
438 438
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 12cce69b5441..ace958cb3894 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -713,7 +713,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
713 713
714 kfree(obj); 714 kfree(obj);
715 715
716 if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){ 716 if (device->cap._BCL && device->cap._BCM && max_level > 0) {
717 int result; 717 int result;
718 static int count = 0; 718 static int count = 0;
719 char *name; 719 char *name;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index ba8f7f4dfa11..e469647330de 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -538,6 +538,15 @@ config PATA_RADISYS
538 538
539 If unsure, say N. 539 If unsure, say N.
540 540
541config PATA_RB500
542 tristate "RouterBoard 500 PATA CompactFlash support"
543 depends on MIKROTIK_RB500
544 help
545 This option enables support for the RouterBoard 500
546 PATA CompactFlash controller.
547
548 If unsure, say N.
549
541config PATA_RZ1000 550config PATA_RZ1000
542 tristate "PC Tech RZ1000 PATA support" 551 tristate "PC Tech RZ1000 PATA support"
543 depends on PCI 552 depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 701651e37c89..0511e6f0bb58 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
55obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o 55obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
56obj-$(CONFIG_PATA_QDI) += pata_qdi.o 56obj-$(CONFIG_PATA_QDI) += pata_qdi.o
57obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o 57obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
58obj-$(CONFIG_PATA_RB500) += pata_rb500_cf.o
58obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o 59obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
59obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o 60obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
60obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o 61obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 3c06e457b4dc..6978469eb16d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -85,6 +85,7 @@ enum {
85 board_ahci_ign_iferr = 2, 85 board_ahci_ign_iferr = 2,
86 board_ahci_sb600 = 3, 86 board_ahci_sb600 = 3,
87 board_ahci_mv = 4, 87 board_ahci_mv = 4,
88 board_ahci_sb700 = 5,
88 89
89 /* global controller registers */ 90 /* global controller registers */
90 HOST_CAP = 0x00, /* host capabilities */ 91 HOST_CAP = 0x00, /* host capabilities */
@@ -185,6 +186,7 @@ enum {
185 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ 186 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
186 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ 187 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
187 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ 188 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
189 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
188 190
189 /* ap->flags bits */ 191 /* ap->flags bits */
190 192
@@ -254,6 +256,7 @@ static void ahci_vt8251_error_handler(struct ata_port *ap);
254static void ahci_p5wdh_error_handler(struct ata_port *ap); 256static void ahci_p5wdh_error_handler(struct ata_port *ap);
255static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); 257static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
256static int ahci_port_resume(struct ata_port *ap); 258static int ahci_port_resume(struct ata_port *ap);
259static void ahci_dev_config(struct ata_device *dev);
257static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); 260static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
258static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, 261static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
259 u32 opts); 262 u32 opts);
@@ -293,6 +296,8 @@ static const struct ata_port_operations ahci_ops = {
293 .check_altstatus = ahci_check_status, 296 .check_altstatus = ahci_check_status,
294 .dev_select = ata_noop_dev_select, 297 .dev_select = ata_noop_dev_select,
295 298
299 .dev_config = ahci_dev_config,
300
296 .tf_read = ahci_tf_read, 301 .tf_read = ahci_tf_read,
297 302
298 .qc_defer = sata_pmp_qc_defer_cmd_switch, 303 .qc_defer = sata_pmp_qc_defer_cmd_switch,
@@ -424,7 +429,7 @@ static const struct ata_port_info ahci_port_info[] = {
424 /* board_ahci_sb600 */ 429 /* board_ahci_sb600 */
425 { 430 {
426 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | 431 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
427 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_PMP), 432 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
428 .flags = AHCI_FLAG_COMMON, 433 .flags = AHCI_FLAG_COMMON,
429 .link_flags = AHCI_LFLAG_COMMON, 434 .link_flags = AHCI_LFLAG_COMMON,
430 .pio_mask = 0x1f, /* pio0-4 */ 435 .pio_mask = 0x1f, /* pio0-4 */
@@ -442,6 +447,16 @@ static const struct ata_port_info ahci_port_info[] = {
442 .udma_mask = ATA_UDMA6, 447 .udma_mask = ATA_UDMA6,
443 .port_ops = &ahci_ops, 448 .port_ops = &ahci_ops,
444 }, 449 },
450 /* board_ahci_sb700 */
451 {
452 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
453 AHCI_HFLAG_NO_PMP),
454 .flags = AHCI_FLAG_COMMON,
455 .link_flags = AHCI_LFLAG_COMMON,
456 .pio_mask = 0x1f, /* pio0-4 */
457 .udma_mask = ATA_UDMA6,
458 .port_ops = &ahci_ops,
459 },
445}; 460};
446 461
447static const struct pci_device_id ahci_pci_tbl[] = { 462static const struct pci_device_id ahci_pci_tbl[] = {
@@ -484,12 +499,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
484 499
485 /* ATI */ 500 /* ATI */
486 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ 501 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
487 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700/800 */ 502 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
488 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700/800 */ 503 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
489 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700/800 */ 504 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
490 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700/800 */ 505 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
491 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb600 }, /* ATI SB700/800 */ 506 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
492 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb600 }, /* ATI SB700/800 */ 507 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
493 508
494 /* VIA */ 509 /* VIA */
495 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 510 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -552,6 +567,18 @@ static const struct pci_device_id ahci_pci_tbl[] = {
552 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ 567 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
553 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ 568 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
554 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ 569 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
570 { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
571 { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
572 { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
573 { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
574 { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
575 { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
576 { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
577 { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
578 { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */
579 { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */
580 { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */
581 { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */
555 582
556 /* SiS */ 583 /* SiS */
557 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ 584 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
@@ -657,7 +684,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
657 cap &= ~HOST_CAP_NCQ; 684 cap &= ~HOST_CAP_NCQ;
658 } 685 }
659 686
660 if ((cap && HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { 687 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
661 dev_printk(KERN_INFO, &pdev->dev, 688 dev_printk(KERN_INFO, &pdev->dev,
662 "controller can't do PMP, turning off CAP_PMP\n"); 689 "controller can't do PMP, turning off CAP_PMP\n");
663 cap &= ~HOST_CAP_PMP; 690 cap &= ~HOST_CAP_PMP;
@@ -1165,6 +1192,14 @@ static void ahci_init_controller(struct ata_host *host)
1165 VPRINTK("HOST_CTL 0x%x\n", tmp); 1192 VPRINTK("HOST_CTL 0x%x\n", tmp);
1166} 1193}
1167 1194
1195static void ahci_dev_config(struct ata_device *dev)
1196{
1197 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1198
1199 if (hpriv->flags & AHCI_HFLAG_SECT255)
1200 dev->max_sectors = 255;
1201}
1202
1168static unsigned int ahci_dev_classify(struct ata_port *ap) 1203static unsigned int ahci_dev_classify(struct ata_port *ap)
1169{ 1204{
1170 void __iomem *port_mmio = ahci_port_base(ap); 1205 void __iomem *port_mmio = ahci_port_base(ap);
@@ -1932,7 +1967,7 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1932 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1967 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1933 u32 ctl; 1968 u32 ctl;
1934 1969
1935 if (mesg.event == PM_EVENT_SUSPEND) { 1970 if (mesg.event & PM_EVENT_SLEEP) {
1936 /* AHCI spec rev1.1 section 8.3.3: 1971 /* AHCI spec rev1.1 section 8.3.3:
1937 * Software must disable interrupts prior to requesting a 1972 * Software must disable interrupts prior to requesting a
1938 * transition of the HBA to D3 state. 1973 * transition of the HBA to D3 state.
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 752e7d2f3b2f..fae8404254c0 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1339,7 +1339,7 @@ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1339 * cycles and power trying to do something to the sleeping 1339 * cycles and power trying to do something to the sleeping
1340 * beauty. 1340 * beauty.
1341 */ 1341 */
1342 if (piix_broken_suspend() && mesg.event == PM_EVENT_SUSPEND) { 1342 if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) {
1343 pci_save_state(pdev); 1343 pci_save_state(pdev);
1344 1344
1345 /* mark its power state as "unknown", since we don't 1345 /* mark its power state as "unknown", since we don't
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 9e8ec19260af..0770cb7391a4 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -382,7 +382,7 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
382 382
383 if (ata_msg_probe(ap)) 383 if (ata_msg_probe(ap))
384 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n", 384 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
385 __FUNCTION__, ap->port_no); 385 __func__, ap->port_no);
386 386
387 /* _GTF has no input parameters */ 387 /* _GTF has no input parameters */
388 status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output); 388 status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output);
@@ -402,7 +402,7 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
402 if (ata_msg_probe(ap)) 402 if (ata_msg_probe(ap))
403 ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: " 403 ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: "
404 "length or ptr is NULL (0x%llx, 0x%p)\n", 404 "length or ptr is NULL (0x%llx, 0x%p)\n",
405 __FUNCTION__, 405 __func__,
406 (unsigned long long)output.length, 406 (unsigned long long)output.length,
407 output.pointer); 407 output.pointer);
408 rc = -EINVAL; 408 rc = -EINVAL;
@@ -432,7 +432,7 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
432 if (ata_msg_probe(ap)) 432 if (ata_msg_probe(ap))
433 ata_dev_printk(dev, KERN_DEBUG, 433 ata_dev_printk(dev, KERN_DEBUG,
434 "%s: returning gtf=%p, gtf_count=%d\n", 434 "%s: returning gtf=%p, gtf_count=%d\n",
435 __FUNCTION__, *gtf, rc); 435 __func__, *gtf, rc);
436 } 436 }
437 return rc; 437 return rc;
438 438
@@ -725,7 +725,7 @@ static int ata_acpi_push_id(struct ata_device *dev)
725 725
726 if (ata_msg_probe(ap)) 726 if (ata_msg_probe(ap))
727 ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n", 727 ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n",
728 __FUNCTION__, dev->devno, ap->port_no); 728 __func__, dev->devno, ap->port_no);
729 729
730 /* Give the drive Identify data to the drive via the _SDD method */ 730 /* Give the drive Identify data to the drive via the _SDD method */
731 /* _SDD: set up input parameters */ 731 /* _SDD: set up input parameters */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 60d1bb556973..4bbe31f98ef8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -106,14 +106,15 @@ static struct ata_force_ent *ata_force_tbl;
106static int ata_force_tbl_size; 106static int ata_force_tbl_size;
107 107
108static char ata_force_param_buf[PAGE_SIZE] __initdata; 108static char ata_force_param_buf[PAGE_SIZE] __initdata;
109module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0444); 109/* param_buf is thrown away after initialization, disallow read */
110module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
110MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 111MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
111 112
112int atapi_enabled = 1; 113int atapi_enabled = 1;
113module_param(atapi_enabled, int, 0444); 114module_param(atapi_enabled, int, 0444);
114MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 115MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
115 116
116int atapi_dmadir = 0; 117static int atapi_dmadir = 0;
117module_param(atapi_dmadir, int, 0444); 118module_param(atapi_dmadir, int, 0444);
118MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 119MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
119 120
@@ -153,7 +154,7 @@ MODULE_VERSION(DRV_VERSION);
153 154
154/** 155/**
155 * ata_force_cbl - force cable type according to libata.force 156 * ata_force_cbl - force cable type according to libata.force
156 * @link: ATA link of interest 157 * @ap: ATA port of interest
157 * 158 *
158 * Force cable type according to libata.force and whine about it. 159 * Force cable type according to libata.force and whine about it.
159 * The last entry which has matching port number is used, so it 160 * The last entry which has matching port number is used, so it
@@ -1719,7 +1720,7 @@ void ata_port_flush_task(struct ata_port *ap)
1719 cancel_rearming_delayed_work(&ap->port_task); 1720 cancel_rearming_delayed_work(&ap->port_task);
1720 1721
1721 if (ata_msg_ctl(ap)) 1722 if (ata_msg_ctl(ap))
1722 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); 1723 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1723} 1724}
1724 1725
1725static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1726static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
@@ -2056,7 +2057,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2056 int rc; 2057 int rc;
2057 2058
2058 if (ata_msg_ctl(ap)) 2059 if (ata_msg_ctl(ap))
2059 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); 2060 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2060 2061
2061 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 2062 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
2062 retry: 2063 retry:
@@ -2253,12 +2254,12 @@ int ata_dev_configure(struct ata_device *dev)
2253 2254
2254 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2255 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2255 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 2256 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2256 __FUNCTION__); 2257 __func__);
2257 return 0; 2258 return 0;
2258 } 2259 }
2259 2260
2260 if (ata_msg_probe(ap)) 2261 if (ata_msg_probe(ap))
2261 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); 2262 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2262 2263
2263 /* set horkage */ 2264 /* set horkage */
2264 dev->horkage |= ata_dev_blacklisted(dev); 2265 dev->horkage |= ata_dev_blacklisted(dev);
@@ -2279,7 +2280,7 @@ int ata_dev_configure(struct ata_device *dev)
2279 ata_dev_printk(dev, KERN_DEBUG, 2280 ata_dev_printk(dev, KERN_DEBUG,
2280 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2281 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2281 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2282 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2282 __FUNCTION__, 2283 __func__,
2283 id[49], id[82], id[83], id[84], 2284 id[49], id[82], id[83], id[84],
2284 id[85], id[86], id[87], id[88]); 2285 id[85], id[86], id[87], id[88]);
2285 2286
@@ -2396,6 +2397,7 @@ int ata_dev_configure(struct ata_device *dev)
2396 else if (dev->class == ATA_DEV_ATAPI) { 2397 else if (dev->class == ATA_DEV_ATAPI) {
2397 const char *cdb_intr_string = ""; 2398 const char *cdb_intr_string = "";
2398 const char *atapi_an_string = ""; 2399 const char *atapi_an_string = "";
2400 const char *dma_dir_string = "";
2399 u32 sntf; 2401 u32 sntf;
2400 2402
2401 rc = atapi_cdb_len(id); 2403 rc = atapi_cdb_len(id);
@@ -2436,13 +2438,19 @@ int ata_dev_configure(struct ata_device *dev)
2436 cdb_intr_string = ", CDB intr"; 2438 cdb_intr_string = ", CDB intr";
2437 } 2439 }
2438 2440
2441 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2442 dev->flags |= ATA_DFLAG_DMADIR;
2443 dma_dir_string = ", DMADIR";
2444 }
2445
2439 /* print device info to dmesg */ 2446 /* print device info to dmesg */
2440 if (ata_msg_drv(ap) && print_info) 2447 if (ata_msg_drv(ap) && print_info)
2441 ata_dev_printk(dev, KERN_INFO, 2448 ata_dev_printk(dev, KERN_INFO,
2442 "ATAPI: %s, %s, max %s%s%s\n", 2449 "ATAPI: %s, %s, max %s%s%s%s\n",
2443 modelbuf, fwrevbuf, 2450 modelbuf, fwrevbuf,
2444 ata_mode_string(xfer_mask), 2451 ata_mode_string(xfer_mask),
2445 cdb_intr_string, atapi_an_string); 2452 cdb_intr_string, atapi_an_string,
2453 dma_dir_string);
2446 } 2454 }
2447 2455
2448 /* determine max_sectors */ 2456 /* determine max_sectors */
@@ -2504,13 +2512,13 @@ int ata_dev_configure(struct ata_device *dev)
2504 2512
2505 if (ata_msg_probe(ap)) 2513 if (ata_msg_probe(ap))
2506 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", 2514 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2507 __FUNCTION__, ata_chk_status(ap)); 2515 __func__, ata_chk_status(ap));
2508 return 0; 2516 return 0;
2509 2517
2510err_out_nosup: 2518err_out_nosup:
2511 if (ata_msg_probe(ap)) 2519 if (ata_msg_probe(ap))
2512 ata_dev_printk(dev, KERN_DEBUG, 2520 ata_dev_printk(dev, KERN_DEBUG,
2513 "%s: EXIT, err\n", __FUNCTION__); 2521 "%s: EXIT, err\n", __func__);
2514 return rc; 2522 return rc;
2515} 2523}
2516 2524
@@ -7368,7 +7376,7 @@ void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7368 pci_save_state(pdev); 7376 pci_save_state(pdev);
7369 pci_disable_device(pdev); 7377 pci_disable_device(pdev);
7370 7378
7371 if (mesg.event == PM_EVENT_SUSPEND) 7379 if (mesg.event & PM_EVENT_SLEEP)
7372 pci_set_power_state(pdev, PCI_D3hot); 7380 pci_set_power_state(pdev, PCI_D3hot);
7373} 7381}
7374 7382
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 698ce2cea52c..681252fd8143 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2150,6 +2150,15 @@ int ata_eh_reset(struct ata_link *link, int classify,
2150 ap->ops->set_piomode(ap, dev); 2150 ap->ops->set_piomode(ap, dev);
2151 } 2151 }
2152 2152
2153 if (!softreset && !hardreset) {
2154 if (verbose)
2155 ata_link_printk(link, KERN_INFO, "no reset method "
2156 "available, skipping reset\n");
2157 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2158 lflags |= ATA_LFLAG_ASSUME_ATA;
2159 goto done;
2160 }
2161
2153 /* Determine which reset to use and record in ehc->i.action. 2162 /* Determine which reset to use and record in ehc->i.action.
2154 * prereset() may examine and modify it. 2163 * prereset() may examine and modify it.
2155 */ 2164 */
@@ -2254,6 +2263,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
2254 lflags |= ATA_LFLAG_ASSUME_ATA; 2263 lflags |= ATA_LFLAG_ASSUME_ATA;
2255 } 2264 }
2256 2265
2266 done:
2257 ata_link_for_each_dev(dev, link) { 2267 ata_link_for_each_dev(dev, link) {
2258 /* After the reset, the device state is PIO 0 and the 2268 /* After the reset, the device state is PIO 0 and the
2259 * controller state is undefined. Reset also wakes up 2269 * controller state is undefined. Reset also wakes up
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index caef2bbd4a8a..d91f5090ba9d 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -35,7 +35,7 @@ static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
35 ata_tf_init(pmp_dev, &tf); 35 ata_tf_init(pmp_dev, &tf);
36 tf.command = ATA_CMD_PMP_READ; 36 tf.command = ATA_CMD_PMP_READ;
37 tf.protocol = ATA_PROT_NODATA; 37 tf.protocol = ATA_PROT_NODATA;
38 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 38 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
39 tf.feature = reg; 39 tf.feature = reg;
40 tf.device = link->pmp; 40 tf.device = link->pmp;
41 41
@@ -71,7 +71,7 @@ static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
71 ata_tf_init(pmp_dev, &tf); 71 ata_tf_init(pmp_dev, &tf);
72 tf.command = ATA_CMD_PMP_WRITE; 72 tf.command = ATA_CMD_PMP_WRITE;
73 tf.protocol = ATA_PROT_NODATA; 73 tf.protocol = ATA_PROT_NODATA;
74 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 74 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
75 tf.feature = reg; 75 tf.feature = reg;
76 tf.device = link->pmp; 76 tf.device = link->pmp;
77 tf.nsect = val & 0xff; 77 tf.nsect = val & 0xff;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f888babc8283..8f0e8f2bc628 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
862 struct request_queue *q = sdev->request_queue; 862 struct request_queue *q = sdev->request_queue;
863 void *buf; 863 void *buf;
864 864
865 /* set the min alignment */ 865 /* set the min alignment and padding */
866 blk_queue_update_dma_alignment(sdev->request_queue, 866 blk_queue_update_dma_alignment(sdev->request_queue,
867 ATA_DMA_PAD_SZ - 1); 867 ATA_DMA_PAD_SZ - 1);
868 blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1);
868 869
869 /* configure draining */ 870 /* configure draining */
870 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); 871 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
@@ -1694,12 +1695,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1694 u8 *rbuf; 1695 u8 *rbuf;
1695 unsigned int buflen, rc; 1696 unsigned int buflen, rc;
1696 struct scsi_cmnd *cmd = args->cmd; 1697 struct scsi_cmnd *cmd = args->cmd;
1698 unsigned long flags;
1699
1700 local_irq_save(flags);
1697 1701
1698 buflen = ata_scsi_rbuf_get(cmd, &rbuf); 1702 buflen = ata_scsi_rbuf_get(cmd, &rbuf);
1699 memset(rbuf, 0, buflen); 1703 memset(rbuf, 0, buflen);
1700 rc = actor(args, rbuf, buflen); 1704 rc = actor(args, rbuf, buflen);
1701 ata_scsi_rbuf_put(cmd, rbuf); 1705 ata_scsi_rbuf_put(cmd, rbuf);
1702 1706
1707 local_irq_restore(flags);
1708
1703 if (rc == 0) 1709 if (rc == 0)
1704 cmd->result = SAM_STAT_GOOD; 1710 cmd->result = SAM_STAT_GOOD;
1705 args->done(cmd); 1711 args->done(cmd);
@@ -2473,6 +2479,9 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2473 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2479 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2474 u8 *buf = NULL; 2480 u8 *buf = NULL;
2475 unsigned int buflen; 2481 unsigned int buflen;
2482 unsigned long flags;
2483
2484 local_irq_save(flags);
2476 2485
2477 buflen = ata_scsi_rbuf_get(cmd, &buf); 2486 buflen = ata_scsi_rbuf_get(cmd, &buf);
2478 2487
@@ -2490,6 +2499,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2490 } 2499 }
2491 2500
2492 ata_scsi_rbuf_put(cmd, buf); 2501 ata_scsi_rbuf_put(cmd, buf);
2502
2503 local_irq_restore(flags);
2493 } 2504 }
2494 2505
2495 cmd->result = SAM_STAT_GOOD; 2506 cmd->result = SAM_STAT_GOOD;
@@ -2528,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2528 } 2539 }
2529 2540
2530 qc->tf.command = ATA_CMD_PACKET; 2541 qc->tf.command = ATA_CMD_PACKET;
2531 qc->nbytes = scsi_bufflen(scmd); 2542 qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len;
2532 2543
2533 /* check whether ATAPI DMA is safe */ 2544 /* check whether ATAPI DMA is safe */
2534 if (!using_pio && ata_check_atapi_dma(qc)) 2545 if (!using_pio && ata_check_atapi_dma(qc))
@@ -2539,7 +2550,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2539 * want to set it properly, and for DMA where it is 2550 * want to set it properly, and for DMA where it is
2540 * effectively meaningless. 2551 * effectively meaningless.
2541 */ 2552 */
2542 nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024); 2553 nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024);
2543 2554
2544 /* Most ATAPI devices which honor transfer chunk size don't 2555 /* Most ATAPI devices which honor transfer chunk size don't
2545 * behave according to the spec when odd chunk size which 2556 * behave according to the spec when odd chunk size which
@@ -2582,7 +2593,8 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2582 qc->tf.protocol = ATAPI_PROT_DMA; 2593 qc->tf.protocol = ATAPI_PROT_DMA;
2583 qc->tf.feature |= ATAPI_PKT_DMA; 2594 qc->tf.feature |= ATAPI_PKT_DMA;
2584 2595
2585 if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE)) 2596 if ((dev->flags & ATA_DFLAG_DMADIR) &&
2597 (scmd->sc_data_direction != DMA_TO_DEVICE))
2586 /* some SATA bridges need us to indicate data xfer direction */ 2598 /* some SATA bridges need us to indicate data xfer direction */
2587 qc->tf.feature |= ATAPI_DMADIR; 2599 qc->tf.feature |= ATAPI_DMADIR;
2588 } 2600 }
@@ -2864,7 +2876,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2864 * TODO: find out if we need to do more here to 2876 * TODO: find out if we need to do more here to
2865 * cover scatter/gather case. 2877 * cover scatter/gather case.
2866 */ 2878 */
2867 qc->nbytes = scsi_bufflen(scmd); 2879 qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len;
2868 2880
2869 /* request result TF and be quiet about device error */ 2881 /* request result TF and be quiet about device error */
2870 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; 2882 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 60cd4b179766..20dc572fb45a 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -56,7 +56,8 @@ u8 ata_irq_on(struct ata_port *ap)
56 ap->ctl &= ~ATA_NIEN; 56 ap->ctl &= ~ATA_NIEN;
57 ap->last_ctl = ap->ctl; 57 ap->last_ctl = ap->ctl;
58 58
59 iowrite8(ap->ctl, ioaddr->ctl_addr); 59 if (ioaddr->ctl_addr)
60 iowrite8(ap->ctl, ioaddr->ctl_addr);
60 tmp = ata_wait_idle(ap); 61 tmp = ata_wait_idle(ap);
61 62
62 ap->ops->irq_clear(ap); 63 ap->ops->irq_clear(ap);
@@ -81,12 +82,14 @@ void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
81 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 82 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
82 83
83 if (tf->ctl != ap->last_ctl) { 84 if (tf->ctl != ap->last_ctl) {
84 iowrite8(tf->ctl, ioaddr->ctl_addr); 85 if (ioaddr->ctl_addr)
86 iowrite8(tf->ctl, ioaddr->ctl_addr);
85 ap->last_ctl = tf->ctl; 87 ap->last_ctl = tf->ctl;
86 ata_wait_idle(ap); 88 ata_wait_idle(ap);
87 } 89 }
88 90
89 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 91 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
92 WARN_ON(!ioaddr->ctl_addr);
90 iowrite8(tf->hob_feature, ioaddr->feature_addr); 93 iowrite8(tf->hob_feature, ioaddr->feature_addr);
91 iowrite8(tf->hob_nsect, ioaddr->nsect_addr); 94 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
92 iowrite8(tf->hob_lbal, ioaddr->lbal_addr); 95 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
@@ -167,14 +170,17 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
167 tf->device = ioread8(ioaddr->device_addr); 170 tf->device = ioread8(ioaddr->device_addr);
168 171
169 if (tf->flags & ATA_TFLAG_LBA48) { 172 if (tf->flags & ATA_TFLAG_LBA48) {
170 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 173 if (likely(ioaddr->ctl_addr)) {
171 tf->hob_feature = ioread8(ioaddr->error_addr); 174 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
172 tf->hob_nsect = ioread8(ioaddr->nsect_addr); 175 tf->hob_feature = ioread8(ioaddr->error_addr);
173 tf->hob_lbal = ioread8(ioaddr->lbal_addr); 176 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
174 tf->hob_lbam = ioread8(ioaddr->lbam_addr); 177 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
175 tf->hob_lbah = ioread8(ioaddr->lbah_addr); 178 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
176 iowrite8(tf->ctl, ioaddr->ctl_addr); 179 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
177 ap->last_ctl = tf->ctl; 180 iowrite8(tf->ctl, ioaddr->ctl_addr);
181 ap->last_ctl = tf->ctl;
182 } else
183 WARN_ON(1);
178 } 184 }
179} 185}
180 186
@@ -352,7 +358,8 @@ void ata_bmdma_freeze(struct ata_port *ap)
352 ap->ctl |= ATA_NIEN; 358 ap->ctl |= ATA_NIEN;
353 ap->last_ctl = ap->ctl; 359 ap->last_ctl = ap->ctl;
354 360
355 iowrite8(ap->ctl, ioaddr->ctl_addr); 361 if (ioaddr->ctl_addr)
362 iowrite8(ap->ctl, ioaddr->ctl_addr);
356 363
357 /* Under certain circumstances, some controllers raise IRQ on 364 /* Under certain circumstances, some controllers raise IRQ on
358 * ATA_NIEN manipulation. Also, many controllers fail to mask 365 * ATA_NIEN manipulation. Also, many controllers fail to mask
@@ -459,13 +466,14 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
459 */ 466 */
460void ata_bmdma_error_handler(struct ata_port *ap) 467void ata_bmdma_error_handler(struct ata_port *ap)
461{ 468{
462 ata_reset_fn_t hardreset; 469 ata_reset_fn_t softreset = NULL, hardreset = NULL;
463 470
464 hardreset = NULL; 471 if (ap->ioaddr.ctl_addr)
472 softreset = ata_std_softreset;
465 if (sata_scr_valid(&ap->link)) 473 if (sata_scr_valid(&ap->link))
466 hardreset = sata_std_hardreset; 474 hardreset = sata_std_hardreset;
467 475
468 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, 476 ata_bmdma_drive_eh(ap, ata_std_prereset, softreset, hardreset,
469 ata_std_postreset); 477 ata_std_postreset);
470} 478}
471 479
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 6036dedfe377..aa884f71a12a 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -56,7 +56,6 @@ enum {
56extern unsigned int ata_print_id; 56extern unsigned int ata_print_id;
57extern struct workqueue_struct *ata_aux_wq; 57extern struct workqueue_struct *ata_aux_wq;
58extern int atapi_enabled; 58extern int atapi_enabled;
59extern int atapi_dmadir;
60extern int atapi_passthru16; 59extern int atapi_passthru16;
61extern int libata_fua; 60extern int libata_fua;
62extern int libata_noacpi; 61extern int libata_noacpi;
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 9623f5295530..408bdc1a9776 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -227,7 +227,7 @@ static struct scsi_host_template atiixp_sht = {
227 .queuecommand = ata_scsi_queuecmd, 227 .queuecommand = ata_scsi_queuecmd,
228 .can_queue = ATA_DEF_QUEUE, 228 .can_queue = ATA_DEF_QUEUE,
229 .this_id = ATA_SHT_THIS_ID, 229 .this_id = ATA_SHT_THIS_ID,
230 .sg_tablesize = LIBATA_MAX_PRD, 230 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
231 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 231 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
232 .emulated = ATA_SHT_EMULATED, 232 .emulated = ATA_SHT_EMULATED,
233 .use_clustering = ATA_SHT_USE_CLUSTERING, 233 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -259,7 +259,7 @@ static struct ata_port_operations atiixp_port_ops = {
259 .bmdma_stop = atiixp_bmdma_stop, 259 .bmdma_stop = atiixp_bmdma_stop,
260 .bmdma_status = ata_bmdma_status, 260 .bmdma_status = ata_bmdma_status,
261 261
262 .qc_prep = ata_qc_prep, 262 .qc_prep = ata_dumb_qc_prep,
263 .qc_issue = ata_qc_issue_prot, 263 .qc_issue = ata_qc_issue_prot,
264 264
265 .data_xfer = ata_data_xfer, 265 .data_xfer = ata_data_xfer,
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 0713872cf65c..a742efa0da2b 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -27,7 +27,7 @@
27#include <linux/libata.h> 27#include <linux/libata.h>
28 28
29#define DRV_NAME "pata_hpt366" 29#define DRV_NAME "pata_hpt366"
30#define DRV_VERSION "0.6.1" 30#define DRV_VERSION "0.6.2"
31 31
32struct hpt_clock { 32struct hpt_clock {
33 u8 xfer_speed; 33 u8 xfer_speed;
@@ -180,9 +180,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask)
180 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) 180 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
181 mask &= ~ATA_MASK_UDMA; 181 mask &= ~ATA_MASK_UDMA;
182 if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) 182 if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
183 mask &= ~(0x07 << ATA_SHIFT_UDMA); 183 mask &= ~(0xF8 << ATA_SHIFT_UDMA);
184 if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) 184 if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
185 mask &= ~(0x0F << ATA_SHIFT_UDMA); 185 mask &= ~(0xF0 << ATA_SHIFT_UDMA);
186 } 186 }
187 return ata_pci_default_filter(adev, mask); 187 return ata_pci_default_filter(adev, mask);
188} 188}
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 68eb34929cec..9a10878b2ad8 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -24,7 +24,7 @@
24#include <linux/libata.h> 24#include <linux/libata.h>
25 25
26#define DRV_NAME "pata_hpt37x" 26#define DRV_NAME "pata_hpt37x"
27#define DRV_VERSION "0.6.9" 27#define DRV_VERSION "0.6.11"
28 28
29struct hpt_clock { 29struct hpt_clock {
30 u8 xfer_speed; 30 u8 xfer_speed;
@@ -281,7 +281,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask)
281 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) 281 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
282 mask &= ~ATA_MASK_UDMA; 282 mask &= ~ATA_MASK_UDMA;
283 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) 283 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
284 mask &= ~(0x1F << ATA_SHIFT_UDMA); 284 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
285 } 285 }
286 return ata_pci_default_filter(adev, mask); 286 return ata_pci_default_filter(adev, mask);
287} 287}
@@ -297,7 +297,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask)
297{ 297{
298 if (adev->class == ATA_DEV_ATA) { 298 if (adev->class == ATA_DEV_ATA) {
299 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) 299 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
300 mask &= ~ (0x1F << ATA_SHIFT_UDMA); 300 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
301 } 301 }
302 return ata_pci_default_filter(adev, mask); 302 return ata_pci_default_filter(adev, mask);
303} 303}
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 028af5dbeed6..511c89b9bae8 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -39,7 +39,7 @@
39#undef PDC_DEBUG 39#undef PDC_DEBUG
40 40
41#ifdef PDC_DEBUG 41#ifdef PDC_DEBUG
42#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 42#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
43#else 43#else
44#define PDPRINTK(fmt, args...) 44#define PDPRINTK(fmt, args...)
45#endif 45#endif
diff --git a/drivers/ata/pata_rb500_cf.c b/drivers/ata/pata_rb500_cf.c
new file mode 100644
index 000000000000..4ce9b03fe6c8
--- /dev/null
+++ b/drivers/ata/pata_rb500_cf.c
@@ -0,0 +1,314 @@
1/*
2 * A low-level PATA driver to handle a Compact Flash connected on the
3 * Mikrotik's RouterBoard 532 board.
4 *
5 * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org>
6 * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
7 *
8 * This file was based on: drivers/ata/pata_ixp4xx_cf.c
9 * Copyright (C) 2006-07 Tower Technologies
10 * Author: Alessandro Zummo <a.zummo@towertech.it>
11 *
12 * Also was based on the driver for Linux 2.4.xx published by Mikrotik for
13 * their RouterBoard 1xx and 5xx series devices. The original Mikrotik code
14 * seems not to have a license.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25
26#include <linux/io.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29
30#include <linux/libata.h>
31#include <scsi/scsi_host.h>
32
33#include <asm/gpio.h>
34
35#define DRV_NAME "pata-rb500-cf"
36#define DRV_VERSION "0.1.0"
37#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
38
39#define RB500_CF_MAXPORTS 1
40#define RB500_CF_IO_DELAY 400
41
42#define RB500_CF_REG_CMD 0x0800
43#define RB500_CF_REG_CTRL 0x080E
44#define RB500_CF_REG_DATA 0x0C00
45
46struct rb500_cf_info {
47 void __iomem *iobase;
48 unsigned int gpio_line;
49 int frozen;
50 unsigned int irq;
51};
52
53/* ------------------------------------------------------------------------ */
54
55static inline void rb500_pata_finish_io(struct ata_port *ap)
56{
57 struct ata_host *ah = ap->host;
58 struct rb500_cf_info *info = ah->private_data;
59
60 ata_altstatus(ap);
61 ndelay(RB500_CF_IO_DELAY);
62
63 set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
64}
65
66static void rb500_pata_exec_command(struct ata_port *ap,
67 const struct ata_taskfile *tf)
68{
69 writeb(tf->command, ap->ioaddr.command_addr);
70 rb500_pata_finish_io(ap);
71}
72
73static void rb500_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
74 unsigned int buflen, int write_data)
75{
76 struct ata_port *ap = adev->link->ap;
77 void __iomem *ioaddr = ap->ioaddr.data_addr;
78
79 if (write_data) {
80 for (; buflen > 0; buflen--, buf++)
81 writeb(*buf, ioaddr);
82 } else {
83 for (; buflen > 0; buflen--, buf++)
84 *buf = readb(ioaddr);
85 }
86
87 rb500_pata_finish_io(adev->link->ap);
88}
89
90static void rb500_pata_freeze(struct ata_port *ap)
91{
92 struct rb500_cf_info *info = ap->host->private_data;
93
94 info->frozen = 1;
95}
96
97static void rb500_pata_thaw(struct ata_port *ap)
98{
99 struct rb500_cf_info *info = ap->host->private_data;
100
101 info->frozen = 0;
102}
103
104static irqreturn_t rb500_pata_irq_handler(int irq, void *dev_instance)
105{
106 struct ata_host *ah = dev_instance;
107 struct rb500_cf_info *info = ah->private_data;
108
109 if (gpio_get_value(info->gpio_line)) {
110 set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
111 if (!info->frozen)
112 ata_interrupt(info->irq, dev_instance);
113 } else {
114 set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
115 }
116
117 return IRQ_HANDLED;
118}
119
120static void rb500_pata_irq_clear(struct ata_port *ap)
121{
122}
123
124static int rb500_pata_port_start(struct ata_port *ap)
125{
126 return 0;
127}
128
129static struct ata_port_operations rb500_pata_port_ops = {
130 .tf_load = ata_tf_load,
131 .tf_read = ata_tf_read,
132
133 .exec_command = rb500_pata_exec_command,
134 .check_status = ata_check_status,
135 .dev_select = ata_std_dev_select,
136
137 .data_xfer = rb500_pata_data_xfer,
138
139 .qc_prep = ata_qc_prep,
140 .qc_issue = ata_qc_issue_prot,
141
142 .freeze = rb500_pata_freeze,
143 .thaw = rb500_pata_thaw,
144 .error_handler = ata_bmdma_error_handler,
145
146 .irq_handler = rb500_pata_irq_handler,
147 .irq_clear = rb500_pata_irq_clear,
148 .irq_on = ata_irq_on,
149
150 .port_start = rb500_pata_port_start,
151};
152
153/* ------------------------------------------------------------------------ */
154
155static struct scsi_host_template rb500_pata_sht = {
156 .module = THIS_MODULE,
157 .name = DRV_NAME,
158 .ioctl = ata_scsi_ioctl,
159 .queuecommand = ata_scsi_queuecmd,
160 .slave_configure = ata_scsi_slave_config,
161 .slave_destroy = ata_scsi_slave_destroy,
162 .bios_param = ata_std_bios_param,
163 .proc_name = DRV_NAME,
164
165 .can_queue = ATA_DEF_QUEUE,
166 .this_id = ATA_SHT_THIS_ID,
167 .sg_tablesize = LIBATA_MAX_PRD,
168 .dma_boundary = ATA_DMA_BOUNDARY,
169 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
170 .emulated = ATA_SHT_EMULATED,
171 .use_clustering = ATA_SHT_USE_CLUSTERING,
172};
173
174/* ------------------------------------------------------------------------ */
175
176static void rb500_pata_setup_ports(struct ata_host *ah)
177{
178 struct rb500_cf_info *info = ah->private_data;
179 struct ata_port *ap;
180
181 ap = ah->ports[0];
182
183 ap->ops = &rb500_pata_port_ops;
184 ap->pio_mask = 0x1f; /* PIO4 */
185 ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
186
187 ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_CMD;
188 ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
189 ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL;
190
191 ata_std_ports(&ap->ioaddr);
192
193 ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DATA;
194}
195
196static __devinit int rb500_pata_driver_probe(struct platform_device *pdev)
197{
198 unsigned int irq;
199 int gpio;
200 struct resource *res;
201 struct ata_host *ah;
202 struct rb500_cf_info *info;
203 int ret;
204
205 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
206 if (!res) {
207 dev_err(&pdev->dev, "no IOMEM resource found\n");
208 return -EINVAL;
209 }
210
211 irq = platform_get_irq(pdev, 0);
212 if (irq <= 0) {
213 dev_err(&pdev->dev, "no IRQ resource found\n");
214 return -ENOENT;
215 }
216
217 gpio = irq_to_gpio(irq);
218 if (gpio < 0) {
219 dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
220 return -ENOENT;
221 }
222
223 ret = gpio_request(gpio, DRV_NAME);
224 if (ret) {
225 dev_err(&pdev->dev, "GPIO request failed\n");
226 return ret;
227 }
228
229 /* allocate host */
230 ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS);
231 if (!ah)
232 return -ENOMEM;
233
234 platform_set_drvdata(pdev, ah);
235
236 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
237 if (!info)
238 return -ENOMEM;
239
240 ah->private_data = info;
241 info->gpio_line = gpio;
242 info->irq = irq;
243
244 info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
245 res->end - res->start + 1);
246 if (!info->iobase)
247 return -ENOMEM;
248
249 ret = gpio_direction_input(gpio);
250 if (ret) {
251 dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n",
252 ret);
253 goto err_free_gpio;
254 }
255
256 rb500_pata_setup_ports(ah);
257
258 ret = ata_host_activate(ah, irq, rb500_pata_irq_handler,
259 IRQF_TRIGGER_LOW, &rb500_pata_sht);
260 if (ret)
261 goto err_free_gpio;
262
263 return 0;
264
265err_free_gpio:
266 gpio_free(gpio);
267
268 return ret;
269}
270
271static __devexit int rb500_pata_driver_remove(struct platform_device *pdev)
272{
273 struct ata_host *ah = platform_get_drvdata(pdev);
274 struct rb500_cf_info *info = ah->private_data;
275
276 ata_host_detach(ah);
277 gpio_free(info->gpio_line);
278
279 return 0;
280}
281
282static struct platform_driver rb500_pata_platform_driver = {
283 .probe = rb500_pata_driver_probe,
284 .remove = __devexit_p(rb500_pata_driver_remove),
285 .driver = {
286 .name = DRV_NAME,
287 .owner = THIS_MODULE,
288 },
289};
290
291/* ------------------------------------------------------------------------ */
292
293#define DRV_INFO DRV_DESC " version " DRV_VERSION
294
295static int __init rb500_pata_module_init(void)
296{
297 printk(KERN_INFO DRV_INFO "\n");
298
299 return platform_driver_register(&rb500_pata_platform_driver);
300}
301
302static void __exit rb500_pata_module_exit(void)
303{
304 platform_driver_unregister(&rb500_pata_platform_driver);
305}
306
307MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
308MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
309MODULE_DESCRIPTION(DRV_DESC);
310MODULE_VERSION(DRV_VERSION);
311MODULE_LICENSE("GPL");
312
313module_init(rb500_pata_module_init);
314module_exit(rb500_pata_module_exit);
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 9c523fbf529e..a589c0fa0dbb 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -226,7 +226,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo
226 226
227 for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { 227 for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
228 if (!strcmp(p, model_num)) 228 if (!strcmp(p, model_num))
229 mask &= ~(0x1F << ATA_SHIFT_UDMA); 229 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
230 } 230 }
231 return ata_pci_default_filter(adev, mask); 231 return ata_pci_default_filter(adev, mask);
232} 232}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 9323dd0c7d8d..07791a7a48a5 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -335,7 +335,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
335 dma_addr_t indirect_ext_segment_paddr; 335 dma_addr_t indirect_ext_segment_paddr;
336 unsigned int si; 336 unsigned int si;
337 337
338 VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd); 338 VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd);
339 339
340 indirect_ext_segment_paddr = cmd_desc_paddr + 340 indirect_ext_segment_paddr = cmd_desc_paddr +
341 SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16; 341 SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
@@ -459,7 +459,8 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
459 VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n", 459 VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
460 ioread32(CE + hcr_base), 460 ioread32(CE + hcr_base),
461 ioread32(DE + hcr_base), 461 ioread32(DE + hcr_base),
462 ioread32(CC + hcr_base), ioread32(COMMANDSTAT + csr_base)); 462 ioread32(CC + hcr_base),
463 ioread32(COMMANDSTAT + host_priv->csr_base));
463 464
464 return 0; 465 return 0;
465} 466}
@@ -522,7 +523,8 @@ static void sata_fsl_freeze(struct ata_port *ap)
522 ioread32(CQ + hcr_base), 523 ioread32(CQ + hcr_base),
523 ioread32(CA + hcr_base), 524 ioread32(CA + hcr_base),
524 ioread32(CE + hcr_base), ioread32(DE + hcr_base)); 525 ioread32(CE + hcr_base), ioread32(DE + hcr_base));
525 VPRINTK("CmdStat = 0x%x\n", ioread32(csr_base + COMMANDSTAT)); 526 VPRINTK("CmdStat = 0x%x\n",
527 ioread32(host_priv->csr_base + COMMANDSTAT));
526 528
527 /* disable interrupts on the controller/port */ 529 /* disable interrupts on the controller/port */
528 temp = ioread32(hcr_base + HCONTROL); 530 temp = ioread32(hcr_base + HCONTROL);
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 69f651e0bc98..840d1c4a7850 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -45,6 +45,8 @@
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46#include <linux/device.h> 46#include <linux/device.h>
47#include <scsi/scsi_host.h> 47#include <scsi/scsi_host.h>
48#include <scsi/scsi_cmnd.h>
49#include <scsi/scsi.h>
48#include <linux/libata.h> 50#include <linux/libata.h>
49 51
50#ifdef CONFIG_PPC_OF 52#ifdef CONFIG_PPC_OF
@@ -59,6 +61,7 @@ enum {
59 /* ap->flags bits */ 61 /* ap->flags bits */
60 K2_FLAG_SATA_8_PORTS = (1 << 24), 62 K2_FLAG_SATA_8_PORTS = (1 << 24),
61 K2_FLAG_NO_ATAPI_DMA = (1 << 25), 63 K2_FLAG_NO_ATAPI_DMA = (1 << 25),
64 K2_FLAG_BAR_POS_3 = (1 << 26),
62 65
63 /* Taskfile registers offsets */ 66 /* Taskfile registers offsets */
64 K2_SATA_TF_CMD_OFFSET = 0x00, 67 K2_SATA_TF_CMD_OFFSET = 0x00,
@@ -88,8 +91,10 @@ enum {
88 /* Port stride */ 91 /* Port stride */
89 K2_SATA_PORT_OFFSET = 0x100, 92 K2_SATA_PORT_OFFSET = 0x100,
90 93
91 board_svw4 = 0, 94 chip_svw4 = 0,
92 board_svw8 = 1, 95 chip_svw8 = 1,
96 chip_svw42 = 2, /* bar 3 */
97 chip_svw43 = 3, /* bar 5 */
93}; 98};
94 99
95static u8 k2_stat_check_status(struct ata_port *ap); 100static u8 k2_stat_check_status(struct ata_port *ap);
@@ -97,10 +102,25 @@ static u8 k2_stat_check_status(struct ata_port *ap);
97 102
98static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) 103static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc)
99{ 104{
105 u8 cmnd = qc->scsicmd->cmnd[0];
106
100 if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) 107 if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA)
101 return -1; /* ATAPI DMA not supported */ 108 return -1; /* ATAPI DMA not supported */
109 else {
110 switch (cmnd) {
111 case READ_10:
112 case READ_12:
113 case READ_16:
114 case WRITE_10:
115 case WRITE_12:
116 case WRITE_16:
117 return 0;
118
119 default:
120 return -1;
121 }
102 122
103 return 0; 123 }
104} 124}
105 125
106static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) 126static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
@@ -354,7 +374,7 @@ static const struct ata_port_operations k2_sata_ops = {
354}; 374};
355 375
356static const struct ata_port_info k2_port_info[] = { 376static const struct ata_port_info k2_port_info[] = {
357 /* board_svw4 */ 377 /* chip_svw4 */
358 { 378 {
359 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 379 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
360 ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, 380 ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
@@ -363,7 +383,7 @@ static const struct ata_port_info k2_port_info[] = {
363 .udma_mask = ATA_UDMA6, 383 .udma_mask = ATA_UDMA6,
364 .port_ops = &k2_sata_ops, 384 .port_ops = &k2_sata_ops,
365 }, 385 },
366 /* board_svw8 */ 386 /* chip_svw8 */
367 { 387 {
368 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 388 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
369 ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | 389 ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA |
@@ -373,6 +393,24 @@ static const struct ata_port_info k2_port_info[] = {
373 .udma_mask = ATA_UDMA6, 393 .udma_mask = ATA_UDMA6,
374 .port_ops = &k2_sata_ops, 394 .port_ops = &k2_sata_ops,
375 }, 395 },
396 /* chip_svw42 */
397 {
398 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
399 ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3,
400 .pio_mask = 0x1f,
401 .mwdma_mask = 0x07,
402 .udma_mask = ATA_UDMA6,
403 .port_ops = &k2_sata_ops,
404 },
405 /* chip_svw43 */
406 {
407 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
408 ATA_FLAG_MMIO,
409 .pio_mask = 0x1f,
410 .mwdma_mask = 0x07,
411 .udma_mask = ATA_UDMA6,
412 .port_ops = &k2_sata_ops,
413 },
376}; 414};
377 415
378static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) 416static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
@@ -402,7 +440,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
402 { &k2_port_info[ent->driver_data], NULL }; 440 { &k2_port_info[ent->driver_data], NULL };
403 struct ata_host *host; 441 struct ata_host *host;
404 void __iomem *mmio_base; 442 void __iomem *mmio_base;
405 int n_ports, i, rc; 443 int n_ports, i, rc, bar_pos;
406 444
407 if (!printed_version++) 445 if (!printed_version++)
408 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); 446 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
@@ -416,6 +454,9 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
416 if (!host) 454 if (!host)
417 return -ENOMEM; 455 return -ENOMEM;
418 456
457 bar_pos = 5;
458 if (ppi[0]->flags & K2_FLAG_BAR_POS_3)
459 bar_pos = 3;
419 /* 460 /*
420 * If this driver happens to only be useful on Apple's K2, then 461 * If this driver happens to only be useful on Apple's K2, then
421 * we should check that here as it has a normal Serverworks ID 462 * we should check that here as it has a normal Serverworks ID
@@ -428,17 +469,23 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
428 * Check if we have resources mapped at all (second function may 469 * Check if we have resources mapped at all (second function may
429 * have been disabled by firmware) 470 * have been disabled by firmware)
430 */ 471 */
431 if (pci_resource_len(pdev, 5) == 0) 472 if (pci_resource_len(pdev, bar_pos) == 0) {
473 /* In IDE mode we need to pin the device to ensure that
474 pcim_release does not clear the busmaster bit in config
475 space, clearing causes busmaster DMA to fail on
476 ports 3 & 4 */
477 pcim_pin_device(pdev);
432 return -ENODEV; 478 return -ENODEV;
479 }
433 480
434 /* Request and iomap PCI regions */ 481 /* Request and iomap PCI regions */
435 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); 482 rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME);
436 if (rc == -EBUSY) 483 if (rc == -EBUSY)
437 pcim_pin_device(pdev); 484 pcim_pin_device(pdev);
438 if (rc) 485 if (rc)
439 return rc; 486 return rc;
440 host->iomap = pcim_iomap_table(pdev); 487 host->iomap = pcim_iomap_table(pdev);
441 mmio_base = host->iomap[5]; 488 mmio_base = host->iomap[bar_pos];
442 489
443 /* different controllers have different number of ports - currently 4 or 8 */ 490 /* different controllers have different number of ports - currently 4 or 8 */
444 /* All ports are on the same function. Multi-function device is no 491 /* All ports are on the same function. Multi-function device is no
@@ -483,11 +530,13 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en
483 * controller 530 * controller
484 * */ 531 * */
485static const struct pci_device_id k2_sata_pci_tbl[] = { 532static const struct pci_device_id k2_sata_pci_tbl[] = {
486 { PCI_VDEVICE(SERVERWORKS, 0x0240), board_svw4 }, 533 { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 },
487 { PCI_VDEVICE(SERVERWORKS, 0x0241), board_svw4 }, 534 { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw4 },
488 { PCI_VDEVICE(SERVERWORKS, 0x0242), board_svw8 }, 535 { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw8 },
489 { PCI_VDEVICE(SERVERWORKS, 0x024a), board_svw4 }, 536 { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 },
490 { PCI_VDEVICE(SERVERWORKS, 0x024b), board_svw4 }, 537 { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 },
538 { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 },
539 { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 },
491 540
492 { } 541 { }
493}; 542};
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index c662d686154a..47c57a4294b7 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -331,8 +331,8 @@ module_param(fs_keystream, int, 0);
331#define FS_DEBUG_QSIZE 0x00001000 331#define FS_DEBUG_QSIZE 0x00001000
332 332
333 333
334#define func_enter() fs_dprintk (FS_DEBUG_FLOW, "fs: enter %s\n", __FUNCTION__) 334#define func_enter() fs_dprintk(FS_DEBUG_FLOW, "fs: enter %s\n", __func__)
335#define func_exit() fs_dprintk (FS_DEBUG_FLOW, "fs: exit %s\n", __FUNCTION__) 335#define func_exit() fs_dprintk(FS_DEBUG_FLOW, "fs: exit %s\n", __func__)
336 336
337 337
338static struct fs_dev *fs_boards = NULL; 338static struct fs_dev *fs_boards = NULL;
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f97e050338f0..9427a61f62b0 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -95,8 +95,8 @@
95#if 1 95#if 1
96#define ASSERT(expr) if (!(expr)) { \ 96#define ASSERT(expr) if (!(expr)) { \
97 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 97 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
98 __FUNCTION__, __LINE__, #expr); \ 98 __func__, __LINE__, #expr); \
99 panic(FORE200E "%s", __FUNCTION__); \ 99 panic(FORE200E "%s", __func__); \
100 } 100 }
101#else 101#else
102#define ASSERT(expr) do {} while (0) 102#define ASSERT(expr) do {} while (0)
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index eee54c0cde68..b967919fb7e2 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -555,7 +555,7 @@ idt77252_tx_dump(struct idt77252_dev *card)
555 struct vc_map *vc; 555 struct vc_map *vc;
556 int i; 556 int i;
557 557
558 printk("%s\n", __FUNCTION__); 558 printk("%s\n", __func__);
559 for (i = 0; i < card->tct_size; i++) { 559 for (i = 0; i < card->tct_size; i++) {
560 vc = card->vcs[i]; 560 vc = card->vcs[i];
561 if (!vc) 561 if (!vc)
@@ -1035,7 +1035,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1035 skb = sb_pool_skb(card, le32_to_cpu(rsqe->word_2)); 1035 skb = sb_pool_skb(card, le32_to_cpu(rsqe->word_2));
1036 if (skb == NULL) { 1036 if (skb == NULL) {
1037 printk("%s: NULL skb in %s, rsqe: %08x %08x %08x %08x\n", 1037 printk("%s: NULL skb in %s, rsqe: %08x %08x %08x %08x\n",
1038 card->name, __FUNCTION__, 1038 card->name, __func__,
1039 le32_to_cpu(rsqe->word_1), le32_to_cpu(rsqe->word_2), 1039 le32_to_cpu(rsqe->word_1), le32_to_cpu(rsqe->word_2),
1040 le32_to_cpu(rsqe->word_3), le32_to_cpu(rsqe->word_4)); 1040 le32_to_cpu(rsqe->word_3), le32_to_cpu(rsqe->word_4));
1041 return; 1041 return;
@@ -1873,7 +1873,7 @@ add_rx_skb(struct idt77252_dev *card, int queue,
1873 return; 1873 return;
1874 1874
1875 if (sb_pool_add(card, skb, queue)) { 1875 if (sb_pool_add(card, skb, queue)) {
1876 printk("%s: SB POOL full\n", __FUNCTION__); 1876 printk("%s: SB POOL full\n", __func__);
1877 goto outfree; 1877 goto outfree;
1878 } 1878 }
1879 1879
@@ -1883,7 +1883,7 @@ add_rx_skb(struct idt77252_dev *card, int queue,
1883 IDT77252_PRV_PADDR(skb) = paddr; 1883 IDT77252_PRV_PADDR(skb) = paddr;
1884 1884
1885 if (push_rx_skb(card, skb, queue)) { 1885 if (push_rx_skb(card, skb, queue)) {
1886 printk("%s: FB QUEUE full\n", __FUNCTION__); 1886 printk("%s: FB QUEUE full\n", __func__);
1887 goto outunmap; 1887 goto outunmap;
1888 } 1888 }
1889 } 1889 }
@@ -3821,12 +3821,12 @@ static int __init idt77252_init(void)
3821{ 3821{
3822 struct sk_buff *skb; 3822 struct sk_buff *skb;
3823 3823
3824 printk("%s: at %p\n", __FUNCTION__, idt77252_init); 3824 printk("%s: at %p\n", __func__, idt77252_init);
3825 3825
3826 if (sizeof(skb->cb) < sizeof(struct atm_skb_data) + 3826 if (sizeof(skb->cb) < sizeof(struct atm_skb_data) +
3827 sizeof(struct idt77252_skb_prv)) { 3827 sizeof(struct idt77252_skb_prv)) {
3828 printk(KERN_ERR "%s: skb->cb is too small (%lu < %lu)\n", 3828 printk(KERN_ERR "%s: skb->cb is too small (%lu < %lu)\n",
3829 __FUNCTION__, (unsigned long) sizeof(skb->cb), 3829 __func__, (unsigned long) sizeof(skb->cb),
3830 (unsigned long) sizeof(struct atm_skb_data) + 3830 (unsigned long) sizeof(struct atm_skb_data) +
3831 sizeof(struct idt77252_skb_prv)); 3831 sizeof(struct idt77252_skb_prv));
3832 return -EIO; 3832 return -EIO;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 9c0070b5bd3e..7de543d1d0b4 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -621,7 +621,8 @@ static struct kobject *get_device_parent(struct device *dev,
621static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) 621static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
622{ 622{
623 /* see if we live in a "glue" directory */ 623 /* see if we live in a "glue" directory */
624 if (!dev->class || glue_dir->kset != &dev->class->class_dirs) 624 if (!glue_dir || !dev->class ||
625 glue_dir->kset != &dev->class->class_dirs)
625 return; 626 return;
626 627
627 kobject_put(glue_dir); 628 kobject_put(glue_dir);
@@ -770,17 +771,10 @@ int device_add(struct device *dev)
770 struct class_interface *class_intf; 771 struct class_interface *class_intf;
771 int error; 772 int error;
772 773
773 error = pm_sleep_lock();
774 if (error) {
775 dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__);
776 dump_stack();
777 return error;
778 }
779
780 dev = get_device(dev); 774 dev = get_device(dev);
781 if (!dev || !strlen(dev->bus_id)) { 775 if (!dev || !strlen(dev->bus_id)) {
782 error = -EINVAL; 776 error = -EINVAL;
783 goto Error; 777 goto Done;
784 } 778 }
785 779
786 pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); 780 pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__);
@@ -843,11 +837,9 @@ int device_add(struct device *dev)
843 } 837 }
844 Done: 838 Done:
845 put_device(dev); 839 put_device(dev);
846 pm_sleep_unlock();
847 return error; 840 return error;
848 BusError: 841 BusError:
849 device_pm_remove(dev); 842 device_pm_remove(dev);
850 dpm_sysfs_remove(dev);
851 PMError: 843 PMError:
852 if (dev->bus) 844 if (dev->bus)
853 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 845 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index efaf282c438c..911ec600fe71 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -648,7 +648,7 @@ u64 dma_get_required_mask(struct device *dev)
648 high_totalram += high_totalram - 1; 648 high_totalram += high_totalram - 1;
649 mask = (((u64)high_totalram) << 32) + 0xffffffff; 649 mask = (((u64)high_totalram) << 32) + 0xffffffff;
650 } 650 }
651 return mask & *dev->dma_mask; 651 return mask;
652} 652}
653EXPORT_SYMBOL_GPL(dma_get_required_mask); 653EXPORT_SYMBOL_GPL(dma_get_required_mask);
654#endif 654#endif
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ee9d1c8db0d6..d887d5cb5bef 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -48,7 +48,6 @@
48 */ 48 */
49 49
50LIST_HEAD(dpm_active); 50LIST_HEAD(dpm_active);
51static LIST_HEAD(dpm_locked);
52static LIST_HEAD(dpm_off); 51static LIST_HEAD(dpm_off);
53static LIST_HEAD(dpm_off_irq); 52static LIST_HEAD(dpm_off_irq);
54static LIST_HEAD(dpm_destroy); 53static LIST_HEAD(dpm_destroy);
@@ -81,28 +80,6 @@ void device_pm_add(struct device *dev)
81 */ 80 */
82void device_pm_remove(struct device *dev) 81void device_pm_remove(struct device *dev)
83{ 82{
84 /*
85 * If this function is called during a suspend, it will be blocked,
86 * because we're holding the device's semaphore at that time, which may
87 * lead to a deadlock. In that case we want to print a warning.
88 * However, it may also be called by unregister_dropped_devices() with
89 * the device's semaphore released, in which case the warning should
90 * not be printed.
91 */
92 if (down_trylock(&dev->sem)) {
93 if (down_read_trylock(&pm_sleep_rwsem)) {
94 /* No suspend in progress, wait on dev->sem */
95 down(&dev->sem);
96 up_read(&pm_sleep_rwsem);
97 } else {
98 /* Suspend in progress, we may deadlock */
99 dev_warn(dev, "Suspicious %s during suspend\n",
100 __FUNCTION__);
101 dump_stack();
102 /* The user has been warned ... */
103 down(&dev->sem);
104 }
105 }
106 pr_debug("PM: Removing info for %s:%s\n", 83 pr_debug("PM: Removing info for %s:%s\n",
107 dev->bus ? dev->bus->name : "No Bus", 84 dev->bus ? dev->bus->name : "No Bus",
108 kobject_name(&dev->kobj)); 85 kobject_name(&dev->kobj));
@@ -110,7 +87,6 @@ void device_pm_remove(struct device *dev)
110 dpm_sysfs_remove(dev); 87 dpm_sysfs_remove(dev);
111 list_del_init(&dev->power.entry); 88 list_del_init(&dev->power.entry);
112 mutex_unlock(&dpm_list_mtx); 89 mutex_unlock(&dpm_list_mtx);
113 up(&dev->sem);
114} 90}
115 91
116/** 92/**
@@ -230,6 +206,8 @@ static int resume_device(struct device *dev)
230 TRACE_DEVICE(dev); 206 TRACE_DEVICE(dev);
231 TRACE_RESUME(0); 207 TRACE_RESUME(0);
232 208
209 down(&dev->sem);
210
233 if (dev->bus && dev->bus->resume) { 211 if (dev->bus && dev->bus->resume) {
234 dev_dbg(dev,"resuming\n"); 212 dev_dbg(dev,"resuming\n");
235 error = dev->bus->resume(dev); 213 error = dev->bus->resume(dev);
@@ -245,6 +223,8 @@ static int resume_device(struct device *dev)
245 error = dev->class->resume(dev); 223 error = dev->class->resume(dev);
246 } 224 }
247 225
226 up(&dev->sem);
227
248 TRACE_RESUME(error); 228 TRACE_RESUME(error);
249 return error; 229 return error;
250} 230}
@@ -266,7 +246,7 @@ static void dpm_resume(void)
266 struct list_head *entry = dpm_off.next; 246 struct list_head *entry = dpm_off.next;
267 struct device *dev = to_device(entry); 247 struct device *dev = to_device(entry);
268 248
269 list_move_tail(entry, &dpm_locked); 249 list_move_tail(entry, &dpm_active);
270 mutex_unlock(&dpm_list_mtx); 250 mutex_unlock(&dpm_list_mtx);
271 resume_device(dev); 251 resume_device(dev);
272 mutex_lock(&dpm_list_mtx); 252 mutex_lock(&dpm_list_mtx);
@@ -275,25 +255,6 @@ static void dpm_resume(void)
275} 255}
276 256
277/** 257/**
278 * unlock_all_devices - Release each device's semaphore
279 *
280 * Go through the dpm_off list. Put each device on the dpm_active
281 * list and unlock it.
282 */
283static void unlock_all_devices(void)
284{
285 mutex_lock(&dpm_list_mtx);
286 while (!list_empty(&dpm_locked)) {
287 struct list_head *entry = dpm_locked.prev;
288 struct device *dev = to_device(entry);
289
290 list_move(entry, &dpm_active);
291 up(&dev->sem);
292 }
293 mutex_unlock(&dpm_list_mtx);
294}
295
296/**
297 * unregister_dropped_devices - Unregister devices scheduled for removal 258 * unregister_dropped_devices - Unregister devices scheduled for removal
298 * 259 *
299 * Unregister all devices on the dpm_destroy list. 260 * Unregister all devices on the dpm_destroy list.
@@ -305,7 +266,6 @@ static void unregister_dropped_devices(void)
305 struct list_head *entry = dpm_destroy.next; 266 struct list_head *entry = dpm_destroy.next;
306 struct device *dev = to_device(entry); 267 struct device *dev = to_device(entry);
307 268
308 up(&dev->sem);
309 mutex_unlock(&dpm_list_mtx); 269 mutex_unlock(&dpm_list_mtx);
310 /* This also removes the device from the list */ 270 /* This also removes the device from the list */
311 device_unregister(dev); 271 device_unregister(dev);
@@ -324,7 +284,6 @@ void device_resume(void)
324{ 284{
325 might_sleep(); 285 might_sleep();
326 dpm_resume(); 286 dpm_resume();
327 unlock_all_devices();
328 unregister_dropped_devices(); 287 unregister_dropped_devices();
329 up_write(&pm_sleep_rwsem); 288 up_write(&pm_sleep_rwsem);
330} 289}
@@ -388,18 +347,15 @@ int device_power_down(pm_message_t state)
388 struct list_head *entry = dpm_off.prev; 347 struct list_head *entry = dpm_off.prev;
389 struct device *dev = to_device(entry); 348 struct device *dev = to_device(entry);
390 349
391 list_del_init(&dev->power.entry);
392 error = suspend_device_late(dev, state); 350 error = suspend_device_late(dev, state);
393 if (error) { 351 if (error) {
394 printk(KERN_ERR "Could not power down device %s: " 352 printk(KERN_ERR "Could not power down device %s: "
395 "error %d\n", 353 "error %d\n",
396 kobject_name(&dev->kobj), error); 354 kobject_name(&dev->kobj), error);
397 if (list_empty(&dev->power.entry))
398 list_add(&dev->power.entry, &dpm_off);
399 break; 355 break;
400 } 356 }
401 if (list_empty(&dev->power.entry)) 357 if (!list_empty(&dev->power.entry))
402 list_add(&dev->power.entry, &dpm_off_irq); 358 list_move(&dev->power.entry, &dpm_off_irq);
403 } 359 }
404 360
405 if (!error) 361 if (!error)
@@ -419,6 +375,8 @@ static int suspend_device(struct device *dev, pm_message_t state)
419{ 375{
420 int error = 0; 376 int error = 0;
421 377
378 down(&dev->sem);
379
422 if (dev->power.power_state.event) { 380 if (dev->power.power_state.event) {
423 dev_dbg(dev, "PM: suspend %d-->%d\n", 381 dev_dbg(dev, "PM: suspend %d-->%d\n",
424 dev->power.power_state.event, state.event); 382 dev->power.power_state.event, state.event);
@@ -441,6 +399,9 @@ static int suspend_device(struct device *dev, pm_message_t state)
441 error = dev->bus->suspend(dev, state); 399 error = dev->bus->suspend(dev, state);
442 suspend_report_result(dev->bus->suspend, error); 400 suspend_report_result(dev->bus->suspend, error);
443 } 401 }
402
403 up(&dev->sem);
404
444 return error; 405 return error;
445} 406}
446 407
@@ -461,13 +422,13 @@ static int dpm_suspend(pm_message_t state)
461 int error = 0; 422 int error = 0;
462 423
463 mutex_lock(&dpm_list_mtx); 424 mutex_lock(&dpm_list_mtx);
464 while (!list_empty(&dpm_locked)) { 425 while (!list_empty(&dpm_active)) {
465 struct list_head *entry = dpm_locked.prev; 426 struct list_head *entry = dpm_active.prev;
466 struct device *dev = to_device(entry); 427 struct device *dev = to_device(entry);
467 428
468 list_del_init(&dev->power.entry);
469 mutex_unlock(&dpm_list_mtx); 429 mutex_unlock(&dpm_list_mtx);
470 error = suspend_device(dev, state); 430 error = suspend_device(dev, state);
431 mutex_lock(&dpm_list_mtx);
471 if (error) { 432 if (error) {
472 printk(KERN_ERR "Could not suspend device %s: " 433 printk(KERN_ERR "Could not suspend device %s: "
473 "error %d%s\n", 434 "error %d%s\n",
@@ -476,14 +437,10 @@ static int dpm_suspend(pm_message_t state)
476 (error == -EAGAIN ? 437 (error == -EAGAIN ?
477 " (please convert to suspend_late)" : 438 " (please convert to suspend_late)" :
478 "")); 439 ""));
479 mutex_lock(&dpm_list_mtx);
480 if (list_empty(&dev->power.entry))
481 list_add(&dev->power.entry, &dpm_locked);
482 break; 440 break;
483 } 441 }
484 mutex_lock(&dpm_list_mtx); 442 if (!list_empty(&dev->power.entry))
485 if (list_empty(&dev->power.entry)) 443 list_move(&dev->power.entry, &dpm_off);
486 list_add(&dev->power.entry, &dpm_off);
487 } 444 }
488 mutex_unlock(&dpm_list_mtx); 445 mutex_unlock(&dpm_list_mtx);
489 446
@@ -491,36 +448,6 @@ static int dpm_suspend(pm_message_t state)
491} 448}
492 449
493/** 450/**
494 * lock_all_devices - Acquire every device's semaphore
495 *
496 * Go through the dpm_active list. Carefully lock each device's
497 * semaphore and put it in on the dpm_locked list.
498 */
499static void lock_all_devices(void)
500{
501 mutex_lock(&dpm_list_mtx);
502 while (!list_empty(&dpm_active)) {
503 struct list_head *entry = dpm_active.next;
504 struct device *dev = to_device(entry);
505
506 /* Required locking order is dev->sem first,
507 * then dpm_list_mutex. Hence this awkward code.
508 */
509 get_device(dev);
510 mutex_unlock(&dpm_list_mtx);
511 down(&dev->sem);
512 mutex_lock(&dpm_list_mtx);
513
514 if (list_empty(entry))
515 up(&dev->sem); /* Device was removed */
516 else
517 list_move_tail(entry, &dpm_locked);
518 put_device(dev);
519 }
520 mutex_unlock(&dpm_list_mtx);
521}
522
523/**
524 * device_suspend - Save state and stop all devices in system. 451 * device_suspend - Save state and stop all devices in system.
525 * @state: new power management state 452 * @state: new power management state
526 * 453 *
@@ -533,7 +460,6 @@ int device_suspend(pm_message_t state)
533 460
534 might_sleep(); 461 might_sleep();
535 down_write(&pm_sleep_rwsem); 462 down_write(&pm_sleep_rwsem);
536 lock_all_devices();
537 error = dpm_suspend(state); 463 error = dpm_suspend(state);
538 if (error) 464 if (error)
539 device_resume(); 465 device_resume();
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 2f79c55acdcc..8e13fd942163 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -133,6 +133,7 @@ int sysdev_class_register(struct sysdev_class * cls)
133 pr_debug("Registering sysdev class '%s'\n", 133 pr_debug("Registering sysdev class '%s'\n",
134 kobject_name(&cls->kset.kobj)); 134 kobject_name(&cls->kset.kobj));
135 INIT_LIST_HEAD(&cls->drivers); 135 INIT_LIST_HEAD(&cls->drivers);
136 memset(&cls->kset.kobj, 0x00, sizeof(struct kobject));
136 cls->kset.kobj.parent = &system_kset->kobj; 137 cls->kset.kobj.parent = &system_kset->kobj;
137 cls->kset.kobj.ktype = &ktype_sysdev_class; 138 cls->kset.kobj.ktype = &ktype_sysdev_class;
138 cls->kset.kobj.kset = system_kset; 139 cls->kset.kobj.kset = system_kset;
@@ -227,6 +228,9 @@ int sysdev_register(struct sys_device * sysdev)
227 228
228 pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj)); 229 pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj));
229 230
231 /* initialize the kobject to 0, in case it had previously been used */
232 memset(&sysdev->kobj, 0x00, sizeof(struct kobject));
233
230 /* Make sure the kset is set */ 234 /* Make sure the kset is set */
231 sysdev->kobj.kset = &cls->kset; 235 sysdev->kobj.kset = &cls->kset;
232 236
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index f25e7c6b2d27..40bca48abc12 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -126,9 +126,7 @@ static int transport_setup_classdev(struct attribute_container *cont,
126} 126}
127 127
128/** 128/**
129 * transport_setup_device - declare a new dev for transport class association 129 * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
130 * but don't make it visible yet.
131 *
132 * @dev: the generic device representing the entity being added 130 * @dev: the generic device representing the entity being added
133 * 131 *
134 * Usually, dev represents some component in the HBA system (either 132 * Usually, dev represents some component in the HBA system (either
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9715be3f2487..55bd35c0f082 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -33,6 +33,7 @@
33#include <linux/blkpg.h> 33#include <linux/blkpg.h>
34#include <linux/timer.h> 34#include <linux/timer.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
36#include <linux/init.h> 37#include <linux/init.h>
37#include <linux/hdreg.h> 38#include <linux/hdreg.h>
38#include <linux/spinlock.h> 39#include <linux/spinlock.h>
@@ -131,7 +132,6 @@ static struct board_type products[] = {
131/*define how many times we will try a command because of bus resets */ 132/*define how many times we will try a command because of bus resets */
132#define MAX_CMD_RETRIES 3 133#define MAX_CMD_RETRIES 3
133 134
134#define READ_AHEAD 1024
135#define MAX_CTLR 32 135#define MAX_CTLR 32
136 136
137/* Originally cciss driver only supports 8 major numbers */ 137/* Originally cciss driver only supports 8 major numbers */
@@ -174,8 +174,6 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
174static void fail_all_cmds(unsigned long ctlr); 174static void fail_all_cmds(unsigned long ctlr);
175 175
176#ifdef CONFIG_PROC_FS 176#ifdef CONFIG_PROC_FS
177static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
178 int length, int *eof, void *data);
179static void cciss_procinit(int i); 177static void cciss_procinit(int i);
180#else 178#else
181static void cciss_procinit(int i) 179static void cciss_procinit(int i)
@@ -240,24 +238,46 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
240 */ 238 */
241#define ENG_GIG 1000000000 239#define ENG_GIG 1000000000
242#define ENG_GIG_FACTOR (ENG_GIG/512) 240#define ENG_GIG_FACTOR (ENG_GIG/512)
241#define ENGAGE_SCSI "engage scsi"
243static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 242static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
244 "UNKNOWN" 243 "UNKNOWN"
245}; 244};
246 245
247static struct proc_dir_entry *proc_cciss; 246static struct proc_dir_entry *proc_cciss;
248 247
249static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 248static void cciss_seq_show_header(struct seq_file *seq)
250 int length, int *eof, void *data)
251{ 249{
252 off_t pos = 0; 250 ctlr_info_t *h = seq->private;
253 off_t len = 0; 251
254 int size, i, ctlr; 252 seq_printf(seq, "%s: HP %s Controller\n"
255 ctlr_info_t *h = (ctlr_info_t *) data; 253 "Board ID: 0x%08lx\n"
256 drive_info_struct *drv; 254 "Firmware Version: %c%c%c%c\n"
257 unsigned long flags; 255 "IRQ: %d\n"
258 sector_t vol_sz, vol_sz_frac; 256 "Logical drives: %d\n"
257 "Current Q depth: %d\n"
258 "Current # commands on controller: %d\n"
259 "Max Q depth since init: %d\n"
260 "Max # commands on controller since init: %d\n"
261 "Max SG entries since init: %d\n",
262 h->devname,
263 h->product_name,
264 (unsigned long)h->board_id,
265 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
266 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
267 h->num_luns,
268 h->Qdepth, h->commands_outstanding,
269 h->maxQsinceinit, h->max_outstanding, h->maxSG);
259 270
260 ctlr = h->ctlr; 271#ifdef CONFIG_CISS_SCSI_TAPE
272 cciss_seq_tape_report(seq, h->ctlr);
273#endif /* CONFIG_CISS_SCSI_TAPE */
274}
275
276static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
277{
278 ctlr_info_t *h = seq->private;
279 unsigned ctlr = h->ctlr;
280 unsigned long flags;
261 281
262 /* prevent displaying bogus info during configuration 282 /* prevent displaying bogus info during configuration
263 * or deconfiguration of a logical volume 283 * or deconfiguration of a logical volume
@@ -265,115 +285,155 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 285 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
266 if (h->busy_configuring) { 286 if (h->busy_configuring) {
267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 287 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
268 return -EBUSY; 288 return ERR_PTR(-EBUSY);
269 } 289 }
270 h->busy_configuring = 1; 290 h->busy_configuring = 1;
271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 291 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
272 292
273 size = sprintf(buffer, "%s: HP %s Controller\n" 293 if (*pos == 0)
274 "Board ID: 0x%08lx\n" 294 cciss_seq_show_header(seq);
275 "Firmware Version: %c%c%c%c\n"
276 "IRQ: %d\n"
277 "Logical drives: %d\n"
278 "Max sectors: %d\n"
279 "Current Q depth: %d\n"
280 "Current # commands on controller: %d\n"
281 "Max Q depth since init: %d\n"
282 "Max # commands on controller since init: %d\n"
283 "Max SG entries since init: %d\n\n",
284 h->devname,
285 h->product_name,
286 (unsigned long)h->board_id,
287 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
288 h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
289 h->num_luns,
290 h->cciss_max_sectors,
291 h->Qdepth, h->commands_outstanding,
292 h->maxQsinceinit, h->max_outstanding, h->maxSG);
293
294 pos += size;
295 len += size;
296 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
297 for (i = 0; i <= h->highest_lun; i++) {
298
299 drv = &h->drv[i];
300 if (drv->heads == 0)
301 continue;
302 295
303 vol_sz = drv->nr_blocks; 296 return pos;
304 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); 297}
305 vol_sz_frac *= 100; 298
306 sector_div(vol_sz_frac, ENG_GIG_FACTOR); 299static int cciss_seq_show(struct seq_file *seq, void *v)
300{
301 sector_t vol_sz, vol_sz_frac;
302 ctlr_info_t *h = seq->private;
303 unsigned ctlr = h->ctlr;
304 loff_t *pos = v;
305 drive_info_struct *drv = &h->drv[*pos];
306
307 if (*pos > h->highest_lun)
308 return 0;
309
310 if (drv->heads == 0)
311 return 0;
312
313 vol_sz = drv->nr_blocks;
314 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
315 vol_sz_frac *= 100;
316 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
317
318 if (drv->raid_level > 5)
319 drv->raid_level = RAID_UNKNOWN;
320 seq_printf(seq, "cciss/c%dd%d:"
321 "\t%4u.%02uGB\tRAID %s\n",
322 ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
323 raid_label[drv->raid_level]);
324 return 0;
325}
326
327static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
328{
329 ctlr_info_t *h = seq->private;
330
331 if (*pos > h->highest_lun)
332 return NULL;
333 *pos += 1;
334
335 return pos;
336}
337
338static void cciss_seq_stop(struct seq_file *seq, void *v)
339{
340 ctlr_info_t *h = seq->private;
341
342 /* Only reset h->busy_configuring if we succeeded in setting
343 * it during cciss_seq_start. */
344 if (v == ERR_PTR(-EBUSY))
345 return;
307 346
308 if (drv->raid_level > 5)
309 drv->raid_level = RAID_UNKNOWN;
310 size = sprintf(buffer + len, "cciss/c%dd%d:"
311 "\t%4u.%02uGB\tRAID %s\n",
312 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
313 raid_label[drv->raid_level]);
314 pos += size;
315 len += size;
316 }
317
318 *eof = 1;
319 *start = buffer + offset;
320 len -= offset;
321 if (len > length)
322 len = length;
323 h->busy_configuring = 0; 347 h->busy_configuring = 0;
324 return len;
325} 348}
326 349
327static int 350static struct seq_operations cciss_seq_ops = {
328cciss_proc_write(struct file *file, const char __user *buffer, 351 .start = cciss_seq_start,
329 unsigned long count, void *data) 352 .show = cciss_seq_show,
353 .next = cciss_seq_next,
354 .stop = cciss_seq_stop,
355};
356
357static int cciss_seq_open(struct inode *inode, struct file *file)
330{ 358{
331 unsigned char cmd[80]; 359 int ret = seq_open(file, &cciss_seq_ops);
332 int len; 360 struct seq_file *seq = file->private_data;
333#ifdef CONFIG_CISS_SCSI_TAPE 361
334 ctlr_info_t *h = (ctlr_info_t *) data; 362 if (!ret)
335 int rc; 363 seq->private = PDE(inode)->data;
364
365 return ret;
366}
367
368static ssize_t
369cciss_proc_write(struct file *file, const char __user *buf,
370 size_t length, loff_t *ppos)
371{
372 int err;
373 char *buffer;
374
375#ifndef CONFIG_CISS_SCSI_TAPE
376 return -EINVAL;
336#endif 377#endif
337 378
338 if (count > sizeof(cmd) - 1) 379 if (!buf || length > PAGE_SIZE - 1)
339 return -EINVAL; 380 return -EINVAL;
340 if (copy_from_user(cmd, buffer, count)) 381
341 return -EFAULT; 382 buffer = (char *)__get_free_page(GFP_KERNEL);
342 cmd[count] = '\0'; 383 if (!buffer)
343 len = strlen(cmd); // above 3 lines ensure safety 384 return -ENOMEM;
344 if (len && cmd[len - 1] == '\n') 385
345 cmd[--len] = '\0'; 386 err = -EFAULT;
346# ifdef CONFIG_CISS_SCSI_TAPE 387 if (copy_from_user(buffer, buf, length))
347 if (strcmp("engage scsi", cmd) == 0) { 388 goto out;
389 buffer[length] = '\0';
390
391#ifdef CONFIG_CISS_SCSI_TAPE
392 if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
393 struct seq_file *seq = file->private_data;
394 ctlr_info_t *h = seq->private;
395 int rc;
396
348 rc = cciss_engage_scsi(h->ctlr); 397 rc = cciss_engage_scsi(h->ctlr);
349 if (rc != 0) 398 if (rc != 0)
350 return -rc; 399 err = -rc;
351 return count; 400 else
352 } 401 err = length;
402 } else
403#endif /* CONFIG_CISS_SCSI_TAPE */
404 err = -EINVAL;
353 /* might be nice to have "disengage" too, but it's not 405 /* might be nice to have "disengage" too, but it's not
354 safely possible. (only 1 module use count, lock issues.) */ 406 safely possible. (only 1 module use count, lock issues.) */
355# endif 407
356 return -EINVAL; 408out:
409 free_page((unsigned long)buffer);
410 return err;
357} 411}
358 412
359/* 413static struct file_operations cciss_proc_fops = {
360 * Get us a file in /proc/cciss that says something about each controller. 414 .owner = THIS_MODULE,
361 * Create /proc/cciss if it doesn't exist yet. 415 .open = cciss_seq_open,
362 */ 416 .read = seq_read,
417 .llseek = seq_lseek,
418 .release = seq_release,
419 .write = cciss_proc_write,
420};
421
363static void __devinit cciss_procinit(int i) 422static void __devinit cciss_procinit(int i)
364{ 423{
365 struct proc_dir_entry *pde; 424 struct proc_dir_entry *pde;
366 425
367 if (proc_cciss == NULL) { 426 if (proc_cciss == NULL)
368 proc_cciss = proc_mkdir("cciss", proc_root_driver); 427 proc_cciss = proc_mkdir("cciss", proc_root_driver);
369 if (!proc_cciss) 428 if (!proc_cciss)
370 return; 429 return;
371 } 430 pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
431 S_IROTH, proc_cciss,
432 &cciss_proc_fops);
433 if (!pde)
434 return;
372 435
373 pde = create_proc_read_entry(hba[i]->devname, 436 pde->data = hba[i];
374 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
375 proc_cciss, cciss_proc_get_info, hba[i]);
376 pde->write_proc = cciss_proc_write;
377} 437}
378#endif /* CONFIG_PROC_FS */ 438#endif /* CONFIG_PROC_FS */
379 439
@@ -1341,7 +1401,6 @@ geo_inq:
1341 disk->private_data = &h->drv[drv_index]; 1401 disk->private_data = &h->drv[drv_index];
1342 1402
1343 /* Set up queue information */ 1403 /* Set up queue information */
1344 disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
1345 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); 1404 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
1346 1405
1347 /* This is a hardware imposed limit. */ 1406 /* This is a hardware imposed limit. */
@@ -3434,7 +3493,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3434 } 3493 }
3435 drv->queue = q; 3494 drv->queue = q;
3436 3495
3437 q->backing_dev_info.ra_pages = READ_AHEAD;
3438 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); 3496 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
3439 3497
3440 /* This is a hardware imposed limit. */ 3498 /* This is a hardware imposed limit. */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 55178e9973a0..45ac09300eb3 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1404,21 +1404,18 @@ cciss_engage_scsi(int ctlr)
1404} 1404}
1405 1405
1406static void 1406static void
1407cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len) 1407cciss_seq_tape_report(struct seq_file *seq, int ctlr)
1408{ 1408{
1409 unsigned long flags; 1409 unsigned long flags;
1410 int size;
1411
1412 *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline
1413 1410
1414 CPQ_TAPE_LOCK(ctlr, flags); 1411 CPQ_TAPE_LOCK(ctlr, flags);
1415 size = sprintf(buffer + *len, 1412 seq_printf(seq,
1416 "Sequential access devices: %d\n\n", 1413 "Sequential access devices: %d\n\n",
1417 ccissscsi[ctlr].ndevices); 1414 ccissscsi[ctlr].ndevices);
1418 CPQ_TAPE_UNLOCK(ctlr, flags); 1415 CPQ_TAPE_UNLOCK(ctlr, flags);
1419 *pos += size; *len += size;
1420} 1416}
1421 1417
1418
1422/* Need at least one of these error handlers to keep ../scsi/hosts.c from 1419/* Need at least one of these error handlers to keep ../scsi/hosts.c from
1423 * complaining. Doing a host- or bus-reset can't do anything good here. 1420 * complaining. Doing a host- or bus-reset can't do anything good here.
1424 * Despite what it might say in scsi_error.c, there may well be commands 1421 * Despite what it might say in scsi_error.c, there may well be commands
@@ -1498,6 +1495,5 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
1498#define cciss_scsi_setup(cntl_num) 1495#define cciss_scsi_setup(cntl_num)
1499#define cciss_unregister_scsi(ctlr) 1496#define cciss_unregister_scsi(ctlr)
1500#define cciss_register_scsi(ctlr) 1497#define cciss_register_scsi(ctlr)
1501#define cciss_proc_tape_report(ctlr, buffer, pos, len)
1502 1498
1503#endif /* CONFIG_CISS_SCSI_TAPE */ 1499#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 018753c59b8e..b53fdb0a282c 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -655,6 +655,7 @@ static int __init nbd_init(void)
655 655
656 for (i = 0; i < nbds_max; i++) { 656 for (i = 0; i < nbds_max; i++) {
657 struct gendisk *disk = alloc_disk(1); 657 struct gendisk *disk = alloc_disk(1);
658 elevator_t *old_e;
658 if (!disk) 659 if (!disk)
659 goto out; 660 goto out;
660 nbd_dev[i].disk = disk; 661 nbd_dev[i].disk = disk;
@@ -668,6 +669,11 @@ static int __init nbd_init(void)
668 put_disk(disk); 669 put_disk(disk);
669 goto out; 670 goto out;
670 } 671 }
672 old_e = disk->queue->elevator;
673 if (elevator_init(disk->queue, "deadline") == 0 ||
674 elevator_init(disk->queue, "noop") == 0) {
675 elevator_exit(old_e);
676 }
671 } 677 }
672 678
673 if (register_blkdev(NBD_MAJOR, "nbd")) { 679 if (register_blkdev(NBD_MAJOR, "nbd")) {
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 674cd66dcaba..18feb1c7c33b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -849,7 +849,8 @@ static int pkt_flush_cache(struct pktcdvd_device *pd)
849/* 849/*
850 * speed is given as the normal factor, e.g. 4 for 4x 850 * speed is given as the normal factor, e.g. 4 for 4x
851 */ 851 */
852static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed) 852static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
853 unsigned write_speed, unsigned read_speed)
853{ 854{
854 struct packet_command cgc; 855 struct packet_command cgc;
855 struct request_sense sense; 856 struct request_sense sense;
@@ -1776,7 +1777,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type,
1776 return pkt_generic_packet(pd, &cgc); 1777 return pkt_generic_packet(pd, &cgc);
1777} 1778}
1778 1779
1779static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) 1780static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1781 long *last_written)
1780{ 1782{
1781 disc_information di; 1783 disc_information di;
1782 track_information ti; 1784 track_information ti;
@@ -1813,7 +1815,7 @@ static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
1813/* 1815/*
1814 * write mode select package based on pd->settings 1816 * write mode select package based on pd->settings
1815 */ 1817 */
1816static int pkt_set_write_settings(struct pktcdvd_device *pd) 1818static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1817{ 1819{
1818 struct packet_command cgc; 1820 struct packet_command cgc;
1819 struct request_sense sense; 1821 struct request_sense sense;
@@ -1972,7 +1974,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1972 return 1; 1974 return 1;
1973} 1975}
1974 1976
1975static int pkt_probe_settings(struct pktcdvd_device *pd) 1977static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1976{ 1978{
1977 struct packet_command cgc; 1979 struct packet_command cgc;
1978 unsigned char buf[12]; 1980 unsigned char buf[12];
@@ -2071,7 +2073,8 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
2071/* 2073/*
2072 * enable/disable write caching on drive 2074 * enable/disable write caching on drive
2073 */ 2075 */
2074static int pkt_write_caching(struct pktcdvd_device *pd, int set) 2076static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
2077 int set)
2075{ 2078{
2076 struct packet_command cgc; 2079 struct packet_command cgc;
2077 struct request_sense sense; 2080 struct request_sense sense;
@@ -2116,7 +2119,8 @@ static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
2116/* 2119/*
2117 * Returns drive maximum write speed 2120 * Returns drive maximum write speed
2118 */ 2121 */
2119static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed) 2122static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
2123 unsigned *write_speed)
2120{ 2124{
2121 struct packet_command cgc; 2125 struct packet_command cgc;
2122 struct request_sense sense; 2126 struct request_sense sense;
@@ -2177,7 +2181,8 @@ static char us_clv_to_speed[16] = {
2177/* 2181/*
2178 * reads the maximum media speed from ATIP 2182 * reads the maximum media speed from ATIP
2179 */ 2183 */
2180static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) 2184static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
2185 unsigned *speed)
2181{ 2186{
2182 struct packet_command cgc; 2187 struct packet_command cgc;
2183 struct request_sense sense; 2188 struct request_sense sense;
@@ -2249,7 +2254,7 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
2249 } 2254 }
2250} 2255}
2251 2256
2252static int pkt_perform_opc(struct pktcdvd_device *pd) 2257static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2253{ 2258{
2254 struct packet_command cgc; 2259 struct packet_command cgc;
2255 struct request_sense sense; 2260 struct request_sense sense;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 9e61fca46117..41ca721d2523 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -528,8 +528,7 @@ static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
528 numsg = VIOMAXBLOCKDMA; 528 numsg = VIOMAXBLOCKDMA;
529 529
530 *total_len = 0; 530 *total_len = 0;
531 memset(sg, 0, sizeof(sg[0]) * VIOMAXBLOCKDMA); 531 sg_init_table(sg, VIOMAXBLOCKDMA);
532
533 for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) { 532 for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
534 sg_dma_address(&sg[i]) = rw_data->dma_info[i].token; 533 sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
535 sg_dma_len(&sg[i]) = rw_data->dma_info[i].len; 534 sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 372c7ef633da..8b884f87d8b7 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -116,6 +116,7 @@ static struct usb_device_id blacklist_ids[] = {
116 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, 116 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 },
117 117
118 /* Broadcom BCM2045 */ 118 /* Broadcom BCM2045 */
119 { USB_DEVICE(0x0a5c, 0x2039), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
119 { USB_DEVICE(0x0a5c, 0x2101), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, 120 { USB_DEVICE(0x0a5c, 0x2101), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
120 121
121 /* IBM/Lenovo ThinkPad with Broadcom chip */ 122 /* IBM/Lenovo ThinkPad with Broadcom chip */
@@ -148,6 +149,9 @@ static struct usb_device_id blacklist_ids[] = {
148 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, 149 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC },
149 { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC }, 150 { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC },
150 151
152 /* CONWISE Technology based adapters with buggy SCO support */
153 { USB_DEVICE(0x0e5e, 0x6622), .driver_info = HCI_BROKEN_ISOC },
154
151 /* Belkin F8T012 and F8T013 devices */ 155 /* Belkin F8T012 and F8T013 devices */
152 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, 156 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
153 { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, 157 { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index db259e60289b..12f5baea439b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1152,8 +1152,8 @@ clean_up_and_return:
1152/* This code is similar to that in open_for_data. The routine is called 1152/* This code is similar to that in open_for_data. The routine is called
1153 whenever an audio play operation is requested. 1153 whenever an audio play operation is requested.
1154*/ 1154*/
1155int check_for_audio_disc(struct cdrom_device_info * cdi, 1155static int check_for_audio_disc(struct cdrom_device_info * cdi,
1156 struct cdrom_device_ops * cdo) 1156 struct cdrom_device_ops * cdo)
1157{ 1157{
1158 int ret; 1158 int ret;
1159 tracktype tracks; 1159 tracktype tracks;
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/char/defkeymap.c_shipped
index 0aa419a61767..d2208dfe3f67 100644
--- a/drivers/char/defkeymap.c_shipped
+++ b/drivers/char/defkeymap.c_shipped
@@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = {
223}; 223};
224 224
225struct kbdiacruc accent_table[MAX_DIACR] = { 225struct kbdiacruc accent_table[MAX_DIACR] = {
226 {'`', 'A', '\300'}, {'`', 'a', '\340'}, 226 {'`', 'A', 0300}, {'`', 'a', 0340},
227 {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, 227 {'\'', 'A', 0301}, {'\'', 'a', 0341},
228 {'^', 'A', '\302'}, {'^', 'a', '\342'}, 228 {'^', 'A', 0302}, {'^', 'a', 0342},
229 {'~', 'A', '\303'}, {'~', 'a', '\343'}, 229 {'~', 'A', 0303}, {'~', 'a', 0343},
230 {'"', 'A', '\304'}, {'"', 'a', '\344'}, 230 {'"', 'A', 0304}, {'"', 'a', 0344},
231 {'O', 'A', '\305'}, {'o', 'a', '\345'}, 231 {'O', 'A', 0305}, {'o', 'a', 0345},
232 {'0', 'A', '\305'}, {'0', 'a', '\345'}, 232 {'0', 'A', 0305}, {'0', 'a', 0345},
233 {'A', 'A', '\305'}, {'a', 'a', '\345'}, 233 {'A', 'A', 0305}, {'a', 'a', 0345},
234 {'A', 'E', '\306'}, {'a', 'e', '\346'}, 234 {'A', 'E', 0306}, {'a', 'e', 0346},
235 {',', 'C', '\307'}, {',', 'c', '\347'}, 235 {',', 'C', 0307}, {',', 'c', 0347},
236 {'`', 'E', '\310'}, {'`', 'e', '\350'}, 236 {'`', 'E', 0310}, {'`', 'e', 0350},
237 {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, 237 {'\'', 'E', 0311}, {'\'', 'e', 0351},
238 {'^', 'E', '\312'}, {'^', 'e', '\352'}, 238 {'^', 'E', 0312}, {'^', 'e', 0352},
239 {'"', 'E', '\313'}, {'"', 'e', '\353'}, 239 {'"', 'E', 0313}, {'"', 'e', 0353},
240 {'`', 'I', '\314'}, {'`', 'i', '\354'}, 240 {'`', 'I', 0314}, {'`', 'i', 0354},
241 {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, 241 {'\'', 'I', 0315}, {'\'', 'i', 0355},
242 {'^', 'I', '\316'}, {'^', 'i', '\356'}, 242 {'^', 'I', 0316}, {'^', 'i', 0356},
243 {'"', 'I', '\317'}, {'"', 'i', '\357'}, 243 {'"', 'I', 0317}, {'"', 'i', 0357},
244 {'-', 'D', '\320'}, {'-', 'd', '\360'}, 244 {'-', 'D', 0320}, {'-', 'd', 0360},
245 {'~', 'N', '\321'}, {'~', 'n', '\361'}, 245 {'~', 'N', 0321}, {'~', 'n', 0361},
246 {'`', 'O', '\322'}, {'`', 'o', '\362'}, 246 {'`', 'O', 0322}, {'`', 'o', 0362},
247 {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, 247 {'\'', 'O', 0323}, {'\'', 'o', 0363},
248 {'^', 'O', '\324'}, {'^', 'o', '\364'}, 248 {'^', 'O', 0324}, {'^', 'o', 0364},
249 {'~', 'O', '\325'}, {'~', 'o', '\365'}, 249 {'~', 'O', 0325}, {'~', 'o', 0365},
250 {'"', 'O', '\326'}, {'"', 'o', '\366'}, 250 {'"', 'O', 0326}, {'"', 'o', 0366},
251 {'/', 'O', '\330'}, {'/', 'o', '\370'}, 251 {'/', 'O', 0330}, {'/', 'o', 0370},
252 {'`', 'U', '\331'}, {'`', 'u', '\371'}, 252 {'`', 'U', 0331}, {'`', 'u', 0371},
253 {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, 253 {'\'', 'U', 0332}, {'\'', 'u', 0372},
254 {'^', 'U', '\333'}, {'^', 'u', '\373'}, 254 {'^', 'U', 0333}, {'^', 'u', 0373},
255 {'"', 'U', '\334'}, {'"', 'u', '\374'}, 255 {'"', 'U', 0334}, {'"', 'u', 0374},
256 {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, 256 {'\'', 'Y', 0335}, {'\'', 'y', 0375},
257 {'T', 'H', '\336'}, {'t', 'h', '\376'}, 257 {'T', 'H', 0336}, {'t', 'h', 0376},
258 {'s', 's', '\337'}, {'"', 'y', '\377'}, 258 {'s', 's', 0337}, {'"', 'y', 0377},
259 {'s', 'z', '\337'}, {'i', 'j', '\377'}, 259 {'s', 'z', 0337}, {'i', 'j', 0377},
260}; 260};
261 261
262unsigned int accent_table_size = 68; 262unsigned int accent_table_size = 68;
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index c01e26d9ee5e..f3fe62067344 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -2484,6 +2484,7 @@ static int __init espserial_init(void)
2484 return 0; 2484 return 0;
2485 } 2485 }
2486 2486
2487 spin_lock_init(&info->lock);
2487 /* rx_trigger, tx_trigger are needed by autoconfig */ 2488 /* rx_trigger, tx_trigger are needed by autoconfig */
2488 info->config.rx_trigger = rx_trigger; 2489 info->config.rx_trigger = rx_trigger;
2489 info->config.tx_trigger = tx_trigger; 2490 info->config.tx_trigger = tx_trigger;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 85d596a3c18c..eba2883b630e 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -1527,7 +1527,7 @@ static int __devinit reset_card(struct pci_dev *pdev,
1527 msleep(10); 1527 msleep(10);
1528 1528
1529 portcount = inw(base + 0x2); 1529 portcount = inw(base + 0x2);
1530 if (!inw(base + 0xe) & 0x1 || (portcount != 0 && portcount != 4 && 1530 if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 &&
1531 portcount != 8 && portcount != 16)) { 1531 portcount != 8 && portcount != 16)) {
1532 dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", 1532 dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n",
1533 card + 1); 1533 card + 1);
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index dfaab2322de3..6d0dc5f9b6bb 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -190,6 +190,14 @@ enum card_type {
190 F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */ 190 F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */
191}; 191};
192 192
193/* Initialization states a card can be in */
194enum card_state {
195 NOZOMI_STATE_UKNOWN = 0,
196 NOZOMI_STATE_ENABLED = 1, /* pci device enabled */
197 NOZOMI_STATE_ALLOCATED = 2, /* config setup done */
198 NOZOMI_STATE_READY = 3, /* flowcontrols received */
199};
200
193/* Two different toggle channels exist */ 201/* Two different toggle channels exist */
194enum channel_type { 202enum channel_type {
195 CH_A = 0, 203 CH_A = 0,
@@ -385,6 +393,7 @@ struct nozomi {
385 spinlock_t spin_mutex; /* secures access to registers and tty */ 393 spinlock_t spin_mutex; /* secures access to registers and tty */
386 394
387 unsigned int index_start; 395 unsigned int index_start;
396 enum card_state state;
388 u32 open_ttys; 397 u32 open_ttys;
389}; 398};
390 399
@@ -686,6 +695,7 @@ static int nozomi_read_config_table(struct nozomi *dc)
686 dc->last_ier = dc->last_ier | CTRL_DL; 695 dc->last_ier = dc->last_ier | CTRL_DL;
687 writew(dc->last_ier, dc->reg_ier); 696 writew(dc->last_ier, dc->reg_ier);
688 697
698 dc->state = NOZOMI_STATE_ALLOCATED;
689 dev_info(&dc->pdev->dev, "Initialization OK!\n"); 699 dev_info(&dc->pdev->dev, "Initialization OK!\n");
690 return 1; 700 return 1;
691 } 701 }
@@ -944,6 +954,14 @@ static int receive_flow_control(struct nozomi *dc)
944 case CTRL_APP2: 954 case CTRL_APP2:
945 port = PORT_APP2; 955 port = PORT_APP2;
946 enable_ier = APP2_DL; 956 enable_ier = APP2_DL;
957 if (dc->state == NOZOMI_STATE_ALLOCATED) {
958 /*
959 * After card initialization the flow control
960 * received for APP2 is always the last
961 */
962 dc->state = NOZOMI_STATE_READY;
963 dev_info(&dc->pdev->dev, "Device READY!\n");
964 }
947 break; 965 break;
948 default: 966 default:
949 dev_err(&dc->pdev->dev, 967 dev_err(&dc->pdev->dev,
@@ -1366,22 +1384,12 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1366 1384
1367 dc->pdev = pdev; 1385 dc->pdev = pdev;
1368 1386
1369 /* Find out what card type it is */
1370 nozomi_get_card_type(dc);
1371
1372 ret = pci_enable_device(dc->pdev); 1387 ret = pci_enable_device(dc->pdev);
1373 if (ret) { 1388 if (ret) {
1374 dev_err(&pdev->dev, "Failed to enable PCI Device\n"); 1389 dev_err(&pdev->dev, "Failed to enable PCI Device\n");
1375 goto err_free; 1390 goto err_free;
1376 } 1391 }
1377 1392
1378 start = pci_resource_start(dc->pdev, 0);
1379 if (start == 0) {
1380 dev_err(&pdev->dev, "No I/O address for card detected\n");
1381 ret = -ENODEV;
1382 goto err_disable_device;
1383 }
1384
1385 ret = pci_request_regions(dc->pdev, NOZOMI_NAME); 1393 ret = pci_request_regions(dc->pdev, NOZOMI_NAME);
1386 if (ret) { 1394 if (ret) {
1387 dev_err(&pdev->dev, "I/O address 0x%04x already in use\n", 1395 dev_err(&pdev->dev, "I/O address 0x%04x already in use\n",
@@ -1389,6 +1397,16 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1389 goto err_disable_device; 1397 goto err_disable_device;
1390 } 1398 }
1391 1399
1400 start = pci_resource_start(dc->pdev, 0);
1401 if (start == 0) {
1402 dev_err(&pdev->dev, "No I/O address for card detected\n");
1403 ret = -ENODEV;
1404 goto err_rel_regs;
1405 }
1406
1407 /* Find out what card type it is */
1408 nozomi_get_card_type(dc);
1409
1392 dc->base_addr = ioremap(start, dc->card_type); 1410 dc->base_addr = ioremap(start, dc->card_type);
1393 if (!dc->base_addr) { 1411 if (!dc->base_addr) {
1394 dev_err(&pdev->dev, "Unable to map card MMIO\n"); 1412 dev_err(&pdev->dev, "Unable to map card MMIO\n");
@@ -1425,6 +1443,14 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1425 dc->index_start = ndev_idx * MAX_PORT; 1443 dc->index_start = ndev_idx * MAX_PORT;
1426 ndevs[ndev_idx] = dc; 1444 ndevs[ndev_idx] = dc;
1427 1445
1446 pci_set_drvdata(pdev, dc);
1447
1448 /* Enable RESET interrupt */
1449 dc->last_ier = RESET;
1450 iowrite16(dc->last_ier, dc->reg_ier);
1451
1452 dc->state = NOZOMI_STATE_ENABLED;
1453
1428 for (i = 0; i < MAX_PORT; i++) { 1454 for (i = 0; i < MAX_PORT; i++) {
1429 mutex_init(&dc->port[i].tty_sem); 1455 mutex_init(&dc->port[i].tty_sem);
1430 dc->port[i].tty_open_count = 0; 1456 dc->port[i].tty_open_count = 0;
@@ -1433,12 +1459,6 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
1433 &pdev->dev); 1459 &pdev->dev);
1434 } 1460 }
1435 1461
1436 /* Enable RESET interrupt. */
1437 dc->last_ier = RESET;
1438 writew(dc->last_ier, dc->reg_ier);
1439
1440 pci_set_drvdata(pdev, dc);
1441
1442 return 0; 1462 return 0;
1443 1463
1444err_free_sbuf: 1464err_free_sbuf:
@@ -1553,7 +1573,7 @@ static int ntty_open(struct tty_struct *tty, struct file *file)
1553 struct nozomi *dc = get_dc_by_tty(tty); 1573 struct nozomi *dc = get_dc_by_tty(tty);
1554 unsigned long flags; 1574 unsigned long flags;
1555 1575
1556 if (!port || !dc) 1576 if (!port || !dc || dc->state != NOZOMI_STATE_READY)
1557 return -ENODEV; 1577 return -ENODEV;
1558 1578
1559 if (mutex_lock_interruptible(&port->tty_sem)) 1579 if (mutex_lock_interruptible(&port->tty_sem))
@@ -1716,6 +1736,10 @@ static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
1716static int ntty_tiocmset(struct tty_struct *tty, struct file *file, 1736static int ntty_tiocmset(struct tty_struct *tty, struct file *file,
1717 unsigned int set, unsigned int clear) 1737 unsigned int set, unsigned int clear)
1718{ 1738{
1739 struct nozomi *dc = get_dc_by_tty(tty);
1740 unsigned long flags;
1741
1742 spin_lock_irqsave(&dc->spin_mutex, flags);
1719 if (set & TIOCM_RTS) 1743 if (set & TIOCM_RTS)
1720 set_rts(tty, 1); 1744 set_rts(tty, 1);
1721 else if (clear & TIOCM_RTS) 1745 else if (clear & TIOCM_RTS)
@@ -1725,6 +1749,7 @@ static int ntty_tiocmset(struct tty_struct *tty, struct file *file,
1725 set_dtr(tty, 1); 1749 set_dtr(tty, 1);
1726 else if (clear & TIOCM_DTR) 1750 else if (clear & TIOCM_DTR)
1727 set_dtr(tty, 0); 1751 set_dtr(tty, 0);
1752 spin_unlock_irqrestore(&dc->spin_mutex, flags);
1728 1753
1729 return 0; 1754 return 0;
1730} 1755}
@@ -1762,7 +1787,7 @@ static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
1762 icount.brk = cnow.brk; 1787 icount.brk = cnow.brk;
1763 icount.buf_overrun = cnow.buf_overrun; 1788 icount.buf_overrun = cnow.buf_overrun;
1764 1789
1765 return copy_to_user(argp, &icount, sizeof(icount)); 1790 return copy_to_user(argp, &icount, sizeof(icount)) ? -EFAULT : 0;
1766} 1791}
1767 1792
1768static int ntty_ioctl(struct tty_struct *tty, struct file *file, 1793static int ntty_ioctl(struct tty_struct *tty, struct file *file,
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c
index ff35230058d3..d793e68b3e0d 100644
--- a/drivers/char/pcmcia/ipwireless/network.c
+++ b/drivers/char/pcmcia/ipwireless/network.c
@@ -377,13 +377,16 @@ void ipwireless_network_packet_received(struct ipw_network *network,
377 for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { 377 for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
378 struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; 378 struct ipw_tty *tty = network->associated_ttys[channel_idx][i];
379 379
380 if (!tty)
381 continue;
382
380 /* 383 /*
381 * If it's associated with a tty (other than the RAS channel 384 * If it's associated with a tty (other than the RAS channel
382 * when we're online), then send the data to that tty. The RAS 385 * when we're online), then send the data to that tty. The RAS
383 * channel's data is handled above - it always goes through 386 * channel's data is handled above - it always goes through
384 * ppp_generic. 387 * ppp_generic.
385 */ 388 */
386 if (tty && channel_idx == IPW_CHANNEL_RAS 389 if (channel_idx == IPW_CHANNEL_RAS
387 && (network->ras_control_lines & 390 && (network->ras_control_lines &
388 IPW_CONTROL_LINE_DCD) != 0 391 IPW_CONTROL_LINE_DCD) != 0
389 && ipwireless_tty_is_modem(tty)) { 392 && ipwireless_tty_is_modem(tty)) {
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 8fc4fe4e38f1..589ac6f65b9a 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -1620,14 +1620,8 @@ static int __init rc_init_drivers(void)
1620 1620
1621static void rc_release_drivers(void) 1621static void rc_release_drivers(void)
1622{ 1622{
1623 unsigned long flags;
1624
1625 spin_lock_irqsave(&riscom_lock, flags);
1626
1627 tty_unregister_driver(riscom_driver); 1623 tty_unregister_driver(riscom_driver);
1628 put_tty_driver(riscom_driver); 1624 put_tty_driver(riscom_driver);
1629
1630 spin_unlock_irqrestore(&riscom_lock, flags);
1631} 1625}
1632 1626
1633#ifndef MODULE 1627#ifndef MODULE
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 78b151c4d20f..5c3142b6f1fc 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -110,8 +110,8 @@ static int rtc_has_irq = 1;
110#define hpet_set_rtc_irq_bit(arg) 0 110#define hpet_set_rtc_irq_bit(arg) 0
111#define hpet_rtc_timer_init() do { } while (0) 111#define hpet_rtc_timer_init() do { } while (0)
112#define hpet_rtc_dropped_irq() 0 112#define hpet_rtc_dropped_irq() 0
113#define hpet_register_irq_handler(h) 0 113#define hpet_register_irq_handler(h) ({ 0; })
114#define hpet_unregister_irq_handler(h) 0 114#define hpet_unregister_irq_handler(h) ({ 0; })
115#ifdef RTC_IRQ 115#ifdef RTC_IRQ
116static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) 116static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
117{ 117{
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index c0e08c7bca2f..5ff83df67b44 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2109,7 +2109,6 @@ static void sx_throttle(struct tty_struct * tty)
2109 sx_out(bp, CD186x_CAR, port_No(port)); 2109 sx_out(bp, CD186x_CAR, port_No(port));
2110 spin_unlock_irqrestore(&bp->lock, flags); 2110 spin_unlock_irqrestore(&bp->lock, flags);
2111 if (I_IXOFF(tty)) { 2111 if (I_IXOFF(tty)) {
2112 spin_unlock_irqrestore(&bp->lock, flags);
2113 sx_wait_CCR(bp); 2112 sx_wait_CCR(bp);
2114 spin_lock_irqsave(&bp->lock, flags); 2113 spin_lock_irqsave(&bp->lock, flags);
2115 sx_out(bp, CD186x_CCR, CCR_SSCH2); 2114 sx_out(bp, CD186x_CCR, CCR_SSCH2);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 367be9175061..9b58b894f823 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -702,6 +702,7 @@ void redraw_screen(struct vc_data *vc, int is_switch)
702 if (is_switch) { 702 if (is_switch) {
703 set_leds(); 703 set_leds();
704 compute_shiftstate(); 704 compute_shiftstate();
705 notify_update(vc);
705 } 706 }
706} 707}
707 708
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
index dfea2bde162b..f577daedb630 100644
--- a/drivers/char/xilinx_hwicap/buffer_icap.c
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -73,8 +73,8 @@
73#define XHI_BUFFER_START 0 73#define XHI_BUFFER_START 0
74 74
75/** 75/**
76 * buffer_icap_get_status: Get the contents of the status register. 76 * buffer_icap_get_status - Get the contents of the status register.
77 * @parameter base_address: is the base address of the device 77 * @base_address: is the base address of the device
78 * 78 *
79 * The status register contains the ICAP status and the done bit. 79 * The status register contains the ICAP status and the done bit.
80 * 80 *
@@ -94,9 +94,9 @@ static inline u32 buffer_icap_get_status(void __iomem *base_address)
94} 94}
95 95
96/** 96/**
97 * buffer_icap_get_bram: Reads data from the storage buffer bram. 97 * buffer_icap_get_bram - Reads data from the storage buffer bram.
98 * @parameter base_address: contains the base address of the component. 98 * @base_address: contains the base address of the component.
99 * @parameter offset: The word offset from which the data should be read. 99 * @offset: The word offset from which the data should be read.
100 * 100 *
101 * A bram is used as a configuration memory cache. One frame of data can 101 * A bram is used as a configuration memory cache. One frame of data can
102 * be stored in this "storage buffer". 102 * be stored in this "storage buffer".
@@ -108,8 +108,8 @@ static inline u32 buffer_icap_get_bram(void __iomem *base_address,
108} 108}
109 109
110/** 110/**
111 * buffer_icap_busy: Return true if the icap device is busy 111 * buffer_icap_busy - Return true if the icap device is busy
112 * @parameter base_address: is the base address of the device 112 * @base_address: is the base address of the device
113 * 113 *
114 * The queries the low order bit of the status register, which 114 * The queries the low order bit of the status register, which
115 * indicates whether the current configuration or readback operation 115 * indicates whether the current configuration or readback operation
@@ -121,8 +121,8 @@ static inline bool buffer_icap_busy(void __iomem *base_address)
121} 121}
122 122
123/** 123/**
124 * buffer_icap_busy: Return true if the icap device is not busy 124 * buffer_icap_busy - Return true if the icap device is not busy
125 * @parameter base_address: is the base address of the device 125 * @base_address: is the base address of the device
126 * 126 *
127 * The queries the low order bit of the status register, which 127 * The queries the low order bit of the status register, which
128 * indicates whether the current configuration or readback operation 128 * indicates whether the current configuration or readback operation
@@ -134,9 +134,9 @@ static inline bool buffer_icap_done(void __iomem *base_address)
134} 134}
135 135
136/** 136/**
137 * buffer_icap_set_size: Set the size register. 137 * buffer_icap_set_size - Set the size register.
138 * @parameter base_address: is the base address of the device 138 * @base_address: is the base address of the device
139 * @parameter data: The size in bytes. 139 * @data: The size in bytes.
140 * 140 *
141 * The size register holds the number of 8 bit bytes to transfer between 141 * The size register holds the number of 8 bit bytes to transfer between
142 * bram and the icap (or icap to bram). 142 * bram and the icap (or icap to bram).
@@ -148,9 +148,9 @@ static inline void buffer_icap_set_size(void __iomem *base_address,
148} 148}
149 149
150/** 150/**
151 * buffer_icap_mSetoffsetReg: Set the bram offset register. 151 * buffer_icap_set_offset - Set the bram offset register.
152 * @parameter base_address: contains the base address of the device. 152 * @base_address: contains the base address of the device.
153 * @parameter data: is the value to be written to the data register. 153 * @data: is the value to be written to the data register.
154 * 154 *
155 * The bram offset register holds the starting bram address to transfer 155 * The bram offset register holds the starting bram address to transfer
156 * data from during configuration or write data to during readback. 156 * data from during configuration or write data to during readback.
@@ -162,9 +162,9 @@ static inline void buffer_icap_set_offset(void __iomem *base_address,
162} 162}
163 163
164/** 164/**
165 * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register. 165 * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register.
166 * @parameter base_address: contains the base address of the device. 166 * @base_address: contains the base address of the device.
167 * @parameter data: is the value to be written to the data register. 167 * @data: is the value to be written to the data register.
168 * 168 *
169 * The RNC register determines the direction of the data transfer. It 169 * The RNC register determines the direction of the data transfer. It
170 * controls whether a configuration or readback take place. Writing to 170 * controls whether a configuration or readback take place. Writing to
@@ -178,10 +178,10 @@ static inline void buffer_icap_set_rnc(void __iomem *base_address,
178} 178}
179 179
180/** 180/**
181 * buffer_icap_set_bram: Write data to the storage buffer bram. 181 * buffer_icap_set_bram - Write data to the storage buffer bram.
182 * @parameter base_address: contains the base address of the component. 182 * @base_address: contains the base address of the component.
183 * @parameter offset: The word offset at which the data should be written. 183 * @offset: The word offset at which the data should be written.
184 * @parameter data: The value to be written to the bram offset. 184 * @data: The value to be written to the bram offset.
185 * 185 *
186 * A bram is used as a configuration memory cache. One frame of data can 186 * A bram is used as a configuration memory cache. One frame of data can
187 * be stored in this "storage buffer". 187 * be stored in this "storage buffer".
@@ -193,10 +193,10 @@ static inline void buffer_icap_set_bram(void __iomem *base_address,
193} 193}
194 194
195/** 195/**
196 * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer. 196 * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer.
197 * @parameter drvdata: a pointer to the drvdata. 197 * @drvdata: a pointer to the drvdata.
198 * @parameter offset: The storage buffer start address. 198 * @offset: The storage buffer start address.
199 * @parameter count: The number of words (32 bit) to read from the 199 * @count: The number of words (32 bit) to read from the
200 * device (ICAP). 200 * device (ICAP).
201 **/ 201 **/
202static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, 202static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
@@ -227,10 +227,10 @@ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
227}; 227};
228 228
229/** 229/**
230 * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer. 230 * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer.
231 * @parameter drvdata: a pointer to the drvdata. 231 * @drvdata: a pointer to the drvdata.
232 * @parameter offset: The storage buffer start address. 232 * @offset: The storage buffer start address.
233 * @parameter count: The number of words (32 bit) to read from the 233 * @count: The number of words (32 bit) to read from the
234 * device (ICAP). 234 * device (ICAP).
235 **/ 235 **/
236static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, 236static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
@@ -261,8 +261,8 @@ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
261}; 261};
262 262
263/** 263/**
264 * buffer_icap_reset: Reset the logic of the icap device. 264 * buffer_icap_reset - Reset the logic of the icap device.
265 * @parameter drvdata: a pointer to the drvdata. 265 * @drvdata: a pointer to the drvdata.
266 * 266 *
267 * Writing to the status register resets the ICAP logic in an internal 267 * Writing to the status register resets the ICAP logic in an internal
268 * version of the core. For the version of the core published in EDK, 268 * version of the core. For the version of the core published in EDK,
@@ -274,10 +274,10 @@ void buffer_icap_reset(struct hwicap_drvdata *drvdata)
274} 274}
275 275
276/** 276/**
277 * buffer_icap_set_configuration: Load a partial bitstream from system memory. 277 * buffer_icap_set_configuration - Load a partial bitstream from system memory.
278 * @parameter drvdata: a pointer to the drvdata. 278 * @drvdata: a pointer to the drvdata.
279 * @parameter data: Kernel address of the partial bitstream. 279 * @data: Kernel address of the partial bitstream.
280 * @parameter size: the size of the partial bitstream in 32 bit words. 280 * @size: the size of the partial bitstream in 32 bit words.
281 **/ 281 **/
282int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, 282int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
283 u32 size) 283 u32 size)
@@ -333,10 +333,10 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
333}; 333};
334 334
335/** 335/**
336 * buffer_icap_get_configuration: Read configuration data from the device. 336 * buffer_icap_get_configuration - Read configuration data from the device.
337 * @parameter drvdata: a pointer to the drvdata. 337 * @drvdata: a pointer to the drvdata.
338 * @parameter data: Address of the data representing the partial bitstream 338 * @data: Address of the data representing the partial bitstream
339 * @parameter size: the size of the partial bitstream in 32 bit words. 339 * @size: the size of the partial bitstream in 32 bit words.
340 **/ 340 **/
341int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, 341int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
342 u32 size) 342 u32 size)
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
index 0988314694a6..6f45dbd47125 100644
--- a/drivers/char/xilinx_hwicap/fifo_icap.c
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -94,9 +94,9 @@
94 94
95 95
96/** 96/**
97 * fifo_icap_fifo_write: Write data to the write FIFO. 97 * fifo_icap_fifo_write - Write data to the write FIFO.
98 * @parameter drvdata: a pointer to the drvdata. 98 * @drvdata: a pointer to the drvdata.
99 * @parameter data: the 32-bit value to be written to the FIFO. 99 * @data: the 32-bit value to be written to the FIFO.
100 * 100 *
101 * This function will silently fail if the fifo is full. 101 * This function will silently fail if the fifo is full.
102 **/ 102 **/
@@ -108,8 +108,8 @@ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata,
108} 108}
109 109
110/** 110/**
111 * fifo_icap_fifo_read: Read data from the Read FIFO. 111 * fifo_icap_fifo_read - Read data from the Read FIFO.
112 * @parameter drvdata: a pointer to the drvdata. 112 * @drvdata: a pointer to the drvdata.
113 * 113 *
114 * This function will silently fail if the fifo is empty. 114 * This function will silently fail if the fifo is empty.
115 **/ 115 **/
@@ -121,9 +121,9 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
121} 121}
122 122
123/** 123/**
124 * fifo_icap_set_read_size: Set the the size register. 124 * fifo_icap_set_read_size - Set the the size register.
125 * @parameter drvdata: a pointer to the drvdata. 125 * @drvdata: a pointer to the drvdata.
126 * @parameter data: the size of the following read transaction, in words. 126 * @data: the size of the following read transaction, in words.
127 **/ 127 **/
128static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, 128static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
129 u32 data) 129 u32 data)
@@ -132,8 +132,8 @@ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
132} 132}
133 133
134/** 134/**
135 * fifo_icap_start_config: Initiate a configuration (write) to the device. 135 * fifo_icap_start_config - Initiate a configuration (write) to the device.
136 * @parameter drvdata: a pointer to the drvdata. 136 * @drvdata: a pointer to the drvdata.
137 **/ 137 **/
138static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) 138static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
139{ 139{
@@ -142,8 +142,8 @@ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
142} 142}
143 143
144/** 144/**
145 * fifo_icap_start_readback: Initiate a readback from the device. 145 * fifo_icap_start_readback - Initiate a readback from the device.
146 * @parameter drvdata: a pointer to the drvdata. 146 * @drvdata: a pointer to the drvdata.
147 **/ 147 **/
148static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) 148static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
149{ 149{
@@ -152,8 +152,8 @@ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
152} 152}
153 153
154/** 154/**
155 * fifo_icap_busy: Return true if the ICAP is still processing a transaction. 155 * fifo_icap_busy - Return true if the ICAP is still processing a transaction.
156 * @parameter drvdata: a pointer to the drvdata. 156 * @drvdata: a pointer to the drvdata.
157 **/ 157 **/
158static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) 158static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
159{ 159{
@@ -163,8 +163,8 @@ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
163} 163}
164 164
165/** 165/**
166 * fifo_icap_write_fifo_vacancy: Query the write fifo available space. 166 * fifo_icap_write_fifo_vacancy - Query the write fifo available space.
167 * @parameter drvdata: a pointer to the drvdata. 167 * @drvdata: a pointer to the drvdata.
168 * 168 *
169 * Return the number of words that can be safely pushed into the write fifo. 169 * Return the number of words that can be safely pushed into the write fifo.
170 **/ 170 **/
@@ -175,8 +175,8 @@ static inline u32 fifo_icap_write_fifo_vacancy(
175} 175}
176 176
177/** 177/**
178 * fifo_icap_read_fifo_occupancy: Query the read fifo available data. 178 * fifo_icap_read_fifo_occupancy - Query the read fifo available data.
179 * @parameter drvdata: a pointer to the drvdata. 179 * @drvdata: a pointer to the drvdata.
180 * 180 *
181 * Return the number of words that can be safely read from the read fifo. 181 * Return the number of words that can be safely read from the read fifo.
182 **/ 182 **/
@@ -187,11 +187,11 @@ static inline u32 fifo_icap_read_fifo_occupancy(
187} 187}
188 188
189/** 189/**
190 * fifo_icap_set_configuration: Send configuration data to the ICAP. 190 * fifo_icap_set_configuration - Send configuration data to the ICAP.
191 * @parameter drvdata: a pointer to the drvdata. 191 * @drvdata: a pointer to the drvdata.
192 * @parameter frame_buffer: a pointer to the data to be written to the 192 * @frame_buffer: a pointer to the data to be written to the
193 * ICAP device. 193 * ICAP device.
194 * @parameter num_words: the number of words (32 bit) to write to the ICAP 194 * @num_words: the number of words (32 bit) to write to the ICAP
195 * device. 195 * device.
196 196
197 * This function writes the given user data to the Write FIFO in 197 * This function writes the given user data to the Write FIFO in
@@ -266,10 +266,10 @@ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata,
266} 266}
267 267
268/** 268/**
269 * fifo_icap_get_configuration: Read configuration data from the device. 269 * fifo_icap_get_configuration - Read configuration data from the device.
270 * @parameter drvdata: a pointer to the drvdata. 270 * @drvdata: a pointer to the drvdata.
271 * @parameter data: Address of the data representing the partial bitstream 271 * @data: Address of the data representing the partial bitstream
272 * @parameter size: the size of the partial bitstream in 32 bit words. 272 * @size: the size of the partial bitstream in 32 bit words.
273 * 273 *
274 * This function reads the specified number of words from the ICAP device in 274 * This function reads the specified number of words from the ICAP device in
275 * the polled mode. 275 * the polled mode.
@@ -335,8 +335,8 @@ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata,
335} 335}
336 336
337/** 337/**
338 * buffer_icap_reset: Reset the logic of the icap device. 338 * buffer_icap_reset - Reset the logic of the icap device.
339 * @parameter drvdata: a pointer to the drvdata. 339 * @drvdata: a pointer to the drvdata.
340 * 340 *
341 * This function forces the software reset of the complete HWICAP device. 341 * This function forces the software reset of the complete HWICAP device.
342 * All the registers will return to the default value and the FIFO is also 342 * All the registers will return to the default value and the FIFO is also
@@ -360,8 +360,8 @@ void fifo_icap_reset(struct hwicap_drvdata *drvdata)
360} 360}
361 361
362/** 362/**
363 * fifo_icap_flush_fifo: This function flushes the FIFOs in the device. 363 * fifo_icap_flush_fifo - This function flushes the FIFOs in the device.
364 * @parameter drvdata: a pointer to the drvdata. 364 * @drvdata: a pointer to the drvdata.
365 */ 365 */
366void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) 366void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata)
367{ 367{
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 24f6aef0fd3c..2284fa2a5a57 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -84,7 +84,7 @@
84#include <linux/init.h> 84#include <linux/init.h>
85#include <linux/poll.h> 85#include <linux/poll.h>
86#include <linux/proc_fs.h> 86#include <linux/proc_fs.h>
87#include <asm/semaphore.h> 87#include <linux/mutex.h>
88#include <linux/sysctl.h> 88#include <linux/sysctl.h>
89#include <linux/version.h> 89#include <linux/version.h>
90#include <linux/fs.h> 90#include <linux/fs.h>
@@ -119,6 +119,7 @@ module_param(xhwicap_minor, int, S_IRUGO);
119 119
120/* An array, which is set to true when the device is registered. */ 120/* An array, which is set to true when the device is registered. */
121static bool probed_devices[HWICAP_DEVICES]; 121static bool probed_devices[HWICAP_DEVICES];
122static struct mutex icap_sem;
122 123
123static struct class *icap_class; 124static struct class *icap_class;
124 125
@@ -199,14 +200,14 @@ static const struct config_registers v5_config_registers = {
199}; 200};
200 201
201/** 202/**
202 * hwicap_command_desync: Send a DESYNC command to the ICAP port. 203 * hwicap_command_desync - Send a DESYNC command to the ICAP port.
203 * @parameter drvdata: a pointer to the drvdata. 204 * @drvdata: a pointer to the drvdata.
204 * 205 *
205 * This command desynchronizes the ICAP After this command, a 206 * This command desynchronizes the ICAP After this command, a
206 * bitstream containing a NULL packet, followed by a SYNCH packet is 207 * bitstream containing a NULL packet, followed by a SYNCH packet is
207 * required before the ICAP will recognize commands. 208 * required before the ICAP will recognize commands.
208 */ 209 */
209int hwicap_command_desync(struct hwicap_drvdata *drvdata) 210static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
210{ 211{
211 u32 buffer[4]; 212 u32 buffer[4];
212 u32 index = 0; 213 u32 index = 0;
@@ -228,51 +229,18 @@ int hwicap_command_desync(struct hwicap_drvdata *drvdata)
228} 229}
229 230
230/** 231/**
231 * hwicap_command_capture: Send a CAPTURE command to the ICAP port. 232 * hwicap_get_configuration_register - Query a configuration register.
232 * @parameter drvdata: a pointer to the drvdata. 233 * @drvdata: a pointer to the drvdata.
233 * 234 * @reg: a constant which represents the configuration
234 * This command captures all of the flip flop states so they will be
235 * available during readback. One can use this command instead of
236 * enabling the CAPTURE block in the design.
237 */
238int hwicap_command_capture(struct hwicap_drvdata *drvdata)
239{
240 u32 buffer[7];
241 u32 index = 0;
242
243 /*
244 * Create the data to be written to the ICAP.
245 */
246 buffer[index++] = XHI_DUMMY_PACKET;
247 buffer[index++] = XHI_SYNC_PACKET;
248 buffer[index++] = XHI_NOOP_PACKET;
249 buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
250 buffer[index++] = XHI_CMD_GCAPTURE;
251 buffer[index++] = XHI_DUMMY_PACKET;
252 buffer[index++] = XHI_DUMMY_PACKET;
253
254 /*
255 * Write the data to the FIFO and intiate the transfer of data
256 * present in the FIFO to the ICAP device.
257 */
258 return drvdata->config->set_configuration(drvdata,
259 &buffer[0], index);
260
261}
262
263/**
264 * hwicap_get_configuration_register: Query a configuration register.
265 * @parameter drvdata: a pointer to the drvdata.
266 * @parameter reg: a constant which represents the configuration
267 * register value to be returned. 235 * register value to be returned.
268 * Examples: XHI_IDCODE, XHI_FLR. 236 * Examples: XHI_IDCODE, XHI_FLR.
269 * @parameter RegData: returns the value of the register. 237 * @reg_data: returns the value of the register.
270 * 238 *
271 * Sends a query packet to the ICAP and then receives the response. 239 * Sends a query packet to the ICAP and then receives the response.
272 * The icap is left in Synched state. 240 * The icap is left in Synched state.
273 */ 241 */
274int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, 242static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
275 u32 reg, u32 *RegData) 243 u32 reg, u32 *reg_data)
276{ 244{
277 int status; 245 int status;
278 u32 buffer[6]; 246 u32 buffer[6];
@@ -300,14 +268,14 @@ int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
300 /* 268 /*
301 * Read the configuration register 269 * Read the configuration register
302 */ 270 */
303 status = drvdata->config->get_configuration(drvdata, RegData, 1); 271 status = drvdata->config->get_configuration(drvdata, reg_data, 1);
304 if (status) 272 if (status)
305 return status; 273 return status;
306 274
307 return 0; 275 return 0;
308} 276}
309 277
310int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) 278static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
311{ 279{
312 int status; 280 int status;
313 u32 idcode; 281 u32 idcode;
@@ -344,7 +312,7 @@ int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
344} 312}
345 313
346static ssize_t 314static ssize_t
347hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) 315hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
348{ 316{
349 struct hwicap_drvdata *drvdata = file->private_data; 317 struct hwicap_drvdata *drvdata = file->private_data;
350 ssize_t bytes_to_read = 0; 318 ssize_t bytes_to_read = 0;
@@ -353,8 +321,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
353 u32 bytes_remaining; 321 u32 bytes_remaining;
354 int status; 322 int status;
355 323
356 if (down_interruptible(&drvdata->sem)) 324 status = mutex_lock_interruptible(&drvdata->sem);
357 return -ERESTARTSYS; 325 if (status)
326 return status;
358 327
359 if (drvdata->read_buffer_in_use) { 328 if (drvdata->read_buffer_in_use) {
360 /* If there are leftover bytes in the buffer, just */ 329 /* If there are leftover bytes in the buffer, just */
@@ -370,8 +339,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
370 goto error; 339 goto error;
371 } 340 }
372 drvdata->read_buffer_in_use -= bytes_to_read; 341 drvdata->read_buffer_in_use -= bytes_to_read;
373 memcpy(drvdata->read_buffer + bytes_to_read, 342 memmove(drvdata->read_buffer,
374 drvdata->read_buffer, 4 - bytes_to_read); 343 drvdata->read_buffer + bytes_to_read,
344 4 - bytes_to_read);
375 } else { 345 } else {
376 /* Get new data from the ICAP, and return was was requested. */ 346 /* Get new data from the ICAP, and return was was requested. */
377 kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); 347 kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
@@ -414,18 +384,20 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
414 status = -EFAULT; 384 status = -EFAULT;
415 goto error; 385 goto error;
416 } 386 }
417 memcpy(kbuf, drvdata->read_buffer, bytes_remaining); 387 memcpy(drvdata->read_buffer,
388 kbuf,
389 bytes_remaining);
418 drvdata->read_buffer_in_use = bytes_remaining; 390 drvdata->read_buffer_in_use = bytes_remaining;
419 free_page((unsigned long)kbuf); 391 free_page((unsigned long)kbuf);
420 } 392 }
421 status = bytes_to_read; 393 status = bytes_to_read;
422 error: 394 error:
423 up(&drvdata->sem); 395 mutex_unlock(&drvdata->sem);
424 return status; 396 return status;
425} 397}
426 398
427static ssize_t 399static ssize_t
428hwicap_write(struct file *file, const char *buf, 400hwicap_write(struct file *file, const char __user *buf,
429 size_t count, loff_t *ppos) 401 size_t count, loff_t *ppos)
430{ 402{
431 struct hwicap_drvdata *drvdata = file->private_data; 403 struct hwicap_drvdata *drvdata = file->private_data;
@@ -435,8 +407,9 @@ hwicap_write(struct file *file, const char *buf,
435 ssize_t len; 407 ssize_t len;
436 ssize_t status; 408 ssize_t status;
437 409
438 if (down_interruptible(&drvdata->sem)) 410 status = mutex_lock_interruptible(&drvdata->sem);
439 return -ERESTARTSYS; 411 if (status)
412 return status;
440 413
441 left += drvdata->write_buffer_in_use; 414 left += drvdata->write_buffer_in_use;
442 415
@@ -465,7 +438,7 @@ hwicap_write(struct file *file, const char *buf,
465 memcpy(kbuf, drvdata->write_buffer, 438 memcpy(kbuf, drvdata->write_buffer,
466 drvdata->write_buffer_in_use); 439 drvdata->write_buffer_in_use);
467 if (copy_from_user( 440 if (copy_from_user(
468 (((char *)kbuf) + (drvdata->write_buffer_in_use)), 441 (((char *)kbuf) + drvdata->write_buffer_in_use),
469 buf + written, 442 buf + written,
470 len - (drvdata->write_buffer_in_use))) { 443 len - (drvdata->write_buffer_in_use))) {
471 free_page((unsigned long)kbuf); 444 free_page((unsigned long)kbuf);
@@ -508,7 +481,7 @@ hwicap_write(struct file *file, const char *buf,
508 free_page((unsigned long)kbuf); 481 free_page((unsigned long)kbuf);
509 status = written; 482 status = written;
510 error: 483 error:
511 up(&drvdata->sem); 484 mutex_unlock(&drvdata->sem);
512 return status; 485 return status;
513} 486}
514 487
@@ -519,8 +492,9 @@ static int hwicap_open(struct inode *inode, struct file *file)
519 492
520 drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); 493 drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
521 494
522 if (down_interruptible(&drvdata->sem)) 495 status = mutex_lock_interruptible(&drvdata->sem);
523 return -ERESTARTSYS; 496 if (status)
497 return status;
524 498
525 if (drvdata->is_open) { 499 if (drvdata->is_open) {
526 status = -EBUSY; 500 status = -EBUSY;
@@ -539,7 +513,7 @@ static int hwicap_open(struct inode *inode, struct file *file)
539 drvdata->is_open = 1; 513 drvdata->is_open = 1;
540 514
541 error: 515 error:
542 up(&drvdata->sem); 516 mutex_unlock(&drvdata->sem);
543 return status; 517 return status;
544} 518}
545 519
@@ -549,8 +523,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
549 int i; 523 int i;
550 int status = 0; 524 int status = 0;
551 525
552 if (down_interruptible(&drvdata->sem)) 526 mutex_lock(&drvdata->sem);
553 return -ERESTARTSYS;
554 527
555 if (drvdata->write_buffer_in_use) { 528 if (drvdata->write_buffer_in_use) {
556 /* Flush write buffer. */ 529 /* Flush write buffer. */
@@ -569,7 +542,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
569 542
570 error: 543 error:
571 drvdata->is_open = 0; 544 drvdata->is_open = 0;
572 up(&drvdata->sem); 545 mutex_unlock(&drvdata->sem);
573 return status; 546 return status;
574} 547}
575 548
@@ -592,31 +565,36 @@ static int __devinit hwicap_setup(struct device *dev, int id,
592 565
593 dev_info(dev, "Xilinx icap port driver\n"); 566 dev_info(dev, "Xilinx icap port driver\n");
594 567
568 mutex_lock(&icap_sem);
569
595 if (id < 0) { 570 if (id < 0) {
596 for (id = 0; id < HWICAP_DEVICES; id++) 571 for (id = 0; id < HWICAP_DEVICES; id++)
597 if (!probed_devices[id]) 572 if (!probed_devices[id])
598 break; 573 break;
599 } 574 }
600 if (id < 0 || id >= HWICAP_DEVICES) { 575 if (id < 0 || id >= HWICAP_DEVICES) {
576 mutex_unlock(&icap_sem);
601 dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); 577 dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
602 return -EINVAL; 578 return -EINVAL;
603 } 579 }
604 if (probed_devices[id]) { 580 if (probed_devices[id]) {
581 mutex_unlock(&icap_sem);
605 dev_err(dev, "cannot assign to %s%i; it is already in use\n", 582 dev_err(dev, "cannot assign to %s%i; it is already in use\n",
606 DRIVER_NAME, id); 583 DRIVER_NAME, id);
607 return -EBUSY; 584 return -EBUSY;
608 } 585 }
609 586
610 probed_devices[id] = 1; 587 probed_devices[id] = 1;
588 mutex_unlock(&icap_sem);
611 589
612 devt = MKDEV(xhwicap_major, xhwicap_minor + id); 590 devt = MKDEV(xhwicap_major, xhwicap_minor + id);
613 591
614 drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); 592 drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
615 if (!drvdata) { 593 if (!drvdata) {
616 dev_err(dev, "Couldn't allocate device private record\n"); 594 dev_err(dev, "Couldn't allocate device private record\n");
617 return -ENOMEM; 595 retval = -ENOMEM;
596 goto failed0;
618 } 597 }
619 memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata));
620 dev_set_drvdata(dev, (void *)drvdata); 598 dev_set_drvdata(dev, (void *)drvdata);
621 599
622 if (!regs_res) { 600 if (!regs_res) {
@@ -648,7 +626,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
648 drvdata->config = config; 626 drvdata->config = config;
649 drvdata->config_regs = config_regs; 627 drvdata->config_regs = config_regs;
650 628
651 init_MUTEX(&drvdata->sem); 629 mutex_init(&drvdata->sem);
652 drvdata->is_open = 0; 630 drvdata->is_open = 0;
653 631
654 dev_info(dev, "ioremap %lx to %p with size %x\n", 632 dev_info(dev, "ioremap %lx to %p with size %x\n",
@@ -663,7 +641,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
663 goto failed3; 641 goto failed3;
664 } 642 }
665 /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ 643 /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */
666 class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME); 644 device_create(icap_class, dev, devt, "%s%d", DRIVER_NAME, id);
667 return 0; /* success */ 645 return 0; /* success */
668 646
669 failed3: 647 failed3:
@@ -675,6 +653,11 @@ static int __devinit hwicap_setup(struct device *dev, int id,
675 failed1: 653 failed1:
676 kfree(drvdata); 654 kfree(drvdata);
677 655
656 failed0:
657 mutex_lock(&icap_sem);
658 probed_devices[id] = 0;
659 mutex_unlock(&icap_sem);
660
678 return retval; 661 return retval;
679} 662}
680 663
@@ -699,14 +682,16 @@ static int __devexit hwicap_remove(struct device *dev)
699 if (!drvdata) 682 if (!drvdata)
700 return 0; 683 return 0;
701 684
702 class_device_destroy(icap_class, drvdata->devt); 685 device_destroy(icap_class, drvdata->devt);
703 cdev_del(&drvdata->cdev); 686 cdev_del(&drvdata->cdev);
704 iounmap(drvdata->base_address); 687 iounmap(drvdata->base_address);
705 release_mem_region(drvdata->mem_start, drvdata->mem_size); 688 release_mem_region(drvdata->mem_start, drvdata->mem_size);
706 kfree(drvdata); 689 kfree(drvdata);
707 dev_set_drvdata(dev, NULL); 690 dev_set_drvdata(dev, NULL);
708 probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0;
709 691
692 mutex_lock(&icap_sem);
693 probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0;
694 mutex_unlock(&icap_sem);
710 return 0; /* success */ 695 return 0; /* success */
711} 696}
712 697
@@ -821,28 +806,29 @@ static struct of_platform_driver hwicap_of_driver = {
821}; 806};
822 807
823/* Registration helpers to keep the number of #ifdefs to a minimum */ 808/* Registration helpers to keep the number of #ifdefs to a minimum */
824static inline int __devinit hwicap_of_register(void) 809static inline int __init hwicap_of_register(void)
825{ 810{
826 pr_debug("hwicap: calling of_register_platform_driver()\n"); 811 pr_debug("hwicap: calling of_register_platform_driver()\n");
827 return of_register_platform_driver(&hwicap_of_driver); 812 return of_register_platform_driver(&hwicap_of_driver);
828} 813}
829 814
830static inline void __devexit hwicap_of_unregister(void) 815static inline void __exit hwicap_of_unregister(void)
831{ 816{
832 of_unregister_platform_driver(&hwicap_of_driver); 817 of_unregister_platform_driver(&hwicap_of_driver);
833} 818}
834#else /* CONFIG_OF */ 819#else /* CONFIG_OF */
835/* CONFIG_OF not enabled; do nothing helpers */ 820/* CONFIG_OF not enabled; do nothing helpers */
836static inline int __devinit hwicap_of_register(void) { return 0; } 821static inline int __init hwicap_of_register(void) { return 0; }
837static inline void __devexit hwicap_of_unregister(void) { } 822static inline void __exit hwicap_of_unregister(void) { }
838#endif /* CONFIG_OF */ 823#endif /* CONFIG_OF */
839 824
840static int __devinit hwicap_module_init(void) 825static int __init hwicap_module_init(void)
841{ 826{
842 dev_t devt; 827 dev_t devt;
843 int retval; 828 int retval;
844 829
845 icap_class = class_create(THIS_MODULE, "xilinx_config"); 830 icap_class = class_create(THIS_MODULE, "xilinx_config");
831 mutex_init(&icap_sem);
846 832
847 if (xhwicap_major) { 833 if (xhwicap_major) {
848 devt = MKDEV(xhwicap_major, xhwicap_minor); 834 devt = MKDEV(xhwicap_major, xhwicap_minor);
@@ -883,7 +869,7 @@ static int __devinit hwicap_module_init(void)
883 return retval; 869 return retval;
884} 870}
885 871
886static void __devexit hwicap_module_cleanup(void) 872static void __exit hwicap_module_cleanup(void)
887{ 873{
888 dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); 874 dev_t devt = MKDEV(xhwicap_major, xhwicap_minor);
889 875
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index ae771cac1629..405fee7e189b 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -48,9 +48,9 @@ struct hwicap_drvdata {
48 u8 write_buffer[4]; 48 u8 write_buffer[4];
49 u32 read_buffer_in_use; /* Always in [0,3] */ 49 u32 read_buffer_in_use; /* Always in [0,3] */
50 u8 read_buffer[4]; 50 u8 read_buffer[4];
51 u32 mem_start; /* phys. address of the control registers */ 51 resource_size_t mem_start;/* phys. address of the control registers */
52 u32 mem_end; /* phys. address of the control registers */ 52 resource_size_t mem_end; /* phys. address of the control registers */
53 u32 mem_size; 53 resource_size_t mem_size;
54 void __iomem *base_address;/* virt. address of the control registers */ 54 void __iomem *base_address;/* virt. address of the control registers */
55 55
56 struct device *dev; 56 struct device *dev;
@@ -61,7 +61,7 @@ struct hwicap_drvdata {
61 const struct config_registers *config_regs; 61 const struct config_registers *config_regs;
62 void *private_data; 62 void *private_data;
63 bool is_open; 63 bool is_open;
64 struct semaphore sem; 64 struct mutex sem;
65}; 65};
66 66
67struct hwicap_driver_config { 67struct hwicap_driver_config {
@@ -164,29 +164,29 @@ struct config_registers {
164#define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL 164#define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL
165 165
166/** 166/**
167 * hwicap_type_1_read: Generates a Type 1 read packet header. 167 * hwicap_type_1_read - Generates a Type 1 read packet header.
168 * @parameter: Register is the address of the register to be read back. 168 * @reg: is the address of the register to be read back.
169 * 169 *
170 * Generates a Type 1 read packet header, which is used to indirectly 170 * Generates a Type 1 read packet header, which is used to indirectly
171 * read registers in the configuration logic. This packet must then 171 * read registers in the configuration logic. This packet must then
172 * be sent through the icap device, and a return packet received with 172 * be sent through the icap device, and a return packet received with
173 * the information. 173 * the information.
174 **/ 174 **/
175static inline u32 hwicap_type_1_read(u32 Register) 175static inline u32 hwicap_type_1_read(u32 reg)
176{ 176{
177 return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | 177 return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
178 (Register << XHI_REGISTER_SHIFT) | 178 (reg << XHI_REGISTER_SHIFT) |
179 (XHI_OP_READ << XHI_OP_SHIFT); 179 (XHI_OP_READ << XHI_OP_SHIFT);
180} 180}
181 181
182/** 182/**
183 * hwicap_type_1_write: Generates a Type 1 write packet header 183 * hwicap_type_1_write - Generates a Type 1 write packet header
184 * @parameter: Register is the address of the register to be read back. 184 * @reg: is the address of the register to be read back.
185 **/ 185 **/
186static inline u32 hwicap_type_1_write(u32 Register) 186static inline u32 hwicap_type_1_write(u32 reg)
187{ 187{
188 return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | 188 return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
189 (Register << XHI_REGISTER_SHIFT) | 189 (reg << XHI_REGISTER_SHIFT) |
190 (XHI_OP_WRITE << XHI_OP_SHIFT); 190 (XHI_OP_WRITE << XHI_OP_SHIFT);
191} 191}
192 192
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index fea2d3ed9cbd..85e2ba7fcfba 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -47,7 +47,7 @@ static LIST_HEAD(notify_list);
47 47
48static struct cn_dev cdev; 48static struct cn_dev cdev;
49 49
50int cn_already_initialized = 0; 50static int cn_already_initialized;
51 51
52/* 52/*
53 * msg->seq and msg->ack are used to determine message genealogy. 53 * msg->seq and msg->ack are used to determine message genealogy.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 89a29cd93783..35a26a3e5f68 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -671,13 +671,13 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
671{ 671{
672 struct cpufreq_policy * policy = to_policy(kobj); 672 struct cpufreq_policy * policy = to_policy(kobj);
673 struct freq_attr * fattr = to_attr(attr); 673 struct freq_attr * fattr = to_attr(attr);
674 ssize_t ret; 674 ssize_t ret = -EINVAL;
675 policy = cpufreq_cpu_get(policy->cpu); 675 policy = cpufreq_cpu_get(policy->cpu);
676 if (!policy) 676 if (!policy)
677 return -EINVAL; 677 goto no_policy;
678 678
679 if (lock_policy_rwsem_read(policy->cpu) < 0) 679 if (lock_policy_rwsem_read(policy->cpu) < 0)
680 return -EINVAL; 680 goto fail;
681 681
682 if (fattr->show) 682 if (fattr->show)
683 ret = fattr->show(policy, buf); 683 ret = fattr->show(policy, buf);
@@ -685,8 +685,9 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
685 ret = -EIO; 685 ret = -EIO;
686 686
687 unlock_policy_rwsem_read(policy->cpu); 687 unlock_policy_rwsem_read(policy->cpu);
688 688fail:
689 cpufreq_cpu_put(policy); 689 cpufreq_cpu_put(policy);
690no_policy:
690 return ret; 691 return ret;
691} 692}
692 693
@@ -695,13 +696,13 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
695{ 696{
696 struct cpufreq_policy * policy = to_policy(kobj); 697 struct cpufreq_policy * policy = to_policy(kobj);
697 struct freq_attr * fattr = to_attr(attr); 698 struct freq_attr * fattr = to_attr(attr);
698 ssize_t ret; 699 ssize_t ret = -EINVAL;
699 policy = cpufreq_cpu_get(policy->cpu); 700 policy = cpufreq_cpu_get(policy->cpu);
700 if (!policy) 701 if (!policy)
701 return -EINVAL; 702 goto no_policy;
702 703
703 if (lock_policy_rwsem_write(policy->cpu) < 0) 704 if (lock_policy_rwsem_write(policy->cpu) < 0)
704 return -EINVAL; 705 goto fail;
705 706
706 if (fattr->store) 707 if (fattr->store)
707 ret = fattr->store(policy, buf, count); 708 ret = fattr->store(policy, buf, count);
@@ -709,8 +710,9 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
709 ret = -EIO; 710 ret = -EIO;
710 711
711 unlock_policy_rwsem_write(policy->cpu); 712 unlock_policy_rwsem_write(policy->cpu);
712 713fail:
713 cpufreq_cpu_put(policy); 714 cpufreq_cpu_put(policy);
715no_policy:
714 return ret; 716 return ret;
715} 717}
716 718
@@ -1775,7 +1777,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1775 return NOTIFY_OK; 1777 return NOTIFY_OK;
1776} 1778}
1777 1779
1778static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = 1780static struct notifier_block __refdata cpufreq_cpu_notifier =
1779{ 1781{
1780 .notifier_call = cpufreq_cpu_callback, 1782 .notifier_call = cpufreq_cpu_callback,
1781}; 1783};
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1b8312b02006..070421a5480e 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -323,7 +323,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
323 return NOTIFY_OK; 323 return NOTIFY_OK;
324} 324}
325 325
326static struct notifier_block cpufreq_stat_cpu_notifier __cpuinitdata = 326static struct notifier_block cpufreq_stat_cpu_notifier __refdata =
327{ 327{
328 .notifier_call = cpufreq_stat_cpu_callback, 328 .notifier_call = cpufreq_stat_cpu_callback,
329}; 329};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a703deffb795..27340a7b19dd 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig DMADEVICES 5menuconfig DMADEVICES
6 bool "DMA Engine support" 6 bool "DMA Engine support"
7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX 7 depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC
8 depends on !HIGHMEM64G 8 depends on !HIGHMEM64G
9 help 9 help
10 DMA engines can do asynchronous data transfers without 10 DMA engines can do asynchronous data transfers without
@@ -37,6 +37,23 @@ config INTEL_IOP_ADMA
37 help 37 help
38 Enable support for the Intel(R) IOP Series RAID engines. 38 Enable support for the Intel(R) IOP Series RAID engines.
39 39
40config FSL_DMA
41 bool "Freescale MPC85xx/MPC83xx DMA support"
42 depends on PPC
43 select DMA_ENGINE
44 ---help---
45 Enable support for the Freescale DMA engine. Now, it support
46 MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
47 The MPC8349, MPC8360 is also supported.
48
49config FSL_DMA_SELFTEST
50 bool "Enable the self test for each DMA channel"
51 depends on FSL_DMA
52 default y
53 ---help---
54 Enable the self test for each DMA channel. A self test will be
55 performed after the channel probed to ensure the DMA works well.
56
40config DMA_ENGINE 57config DMA_ENGINE
41 bool 58 bool
42 59
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index b152cd84e123..c8036d945902 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o
3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o 3obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
4ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o 4ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o 5obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
6obj-$(CONFIG_FSL_DMA) += fsldma.o
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
new file mode 100644
index 000000000000..cc9a68158d99
--- /dev/null
+++ b/drivers/dma/fsldma.c
@@ -0,0 +1,1067 @@
1/*
2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 *
6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 *
10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added.
14 *
15 * This is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 */
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/interrupt.h>
26#include <linux/dmaengine.h>
27#include <linux/delay.h>
28#include <linux/dma-mapping.h>
29#include <linux/dmapool.h>
30#include <linux/of_platform.h>
31
32#include "fsldma.h"
33
34static void dma_init(struct fsl_dma_chan *fsl_chan)
35{
36 /* Reset the channel */
37 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
38
39 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
40 case FSL_DMA_IP_85XX:
41 /* Set the channel to below modes:
42 * EIE - Error interrupt enable
43 * EOSIE - End of segments interrupt enable (basic mode)
44 * EOLNIE - End of links interrupt enable
45 */
46 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
47 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
48 break;
49 case FSL_DMA_IP_83XX:
50 /* Set the channel to below modes:
51 * EOTIE - End-of-transfer interrupt enable
52 */
53 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
54 32);
55 break;
56 }
57
58}
59
60static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val)
61{
62 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
63}
64
65static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan)
66{
67 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
68}
69
70static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
71 struct fsl_dma_ld_hw *hw, u32 count)
72{
73 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
74}
75
76static void set_desc_src(struct fsl_dma_chan *fsl_chan,
77 struct fsl_dma_ld_hw *hw, dma_addr_t src)
78{
79 u64 snoop_bits;
80
81 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
82 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
83 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
84}
85
86static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
87 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
88{
89 u64 snoop_bits;
90
91 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
92 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
93 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
94}
95
96static void set_desc_next(struct fsl_dma_chan *fsl_chan,
97 struct fsl_dma_ld_hw *hw, dma_addr_t next)
98{
99 u64 snoop_bits;
100
101 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
102 ? FSL_DMA_SNEN : 0;
103 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
104}
105
106static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
107{
108 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
109}
110
111static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
112{
113 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
114}
115
116static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
117{
118 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
119}
120
121static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
122{
123 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
124}
125
126static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
127{
128 u32 sr = get_sr(fsl_chan);
129 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
130}
131
132static void dma_start(struct fsl_dma_chan *fsl_chan)
133{
134 u32 mr_set = 0;;
135
136 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
137 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
138 mr_set |= FSL_DMA_MR_EMP_EN;
139 } else
140 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
141 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
142 & ~FSL_DMA_MR_EMP_EN, 32);
143
144 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
145 mr_set |= FSL_DMA_MR_EMS_EN;
146 else
147 mr_set |= FSL_DMA_MR_CS;
148
149 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
150 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
151 | mr_set, 32);
152}
153
154static void dma_halt(struct fsl_dma_chan *fsl_chan)
155{
156 int i = 0;
157 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
158 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
159 32);
160 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
161 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
162 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
163
164 while (!dma_is_idle(fsl_chan) && (i++ < 100))
165 udelay(10);
166 if (i >= 100 && !dma_is_idle(fsl_chan))
167 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
168}
169
170static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
171 struct fsl_desc_sw *desc)
172{
173 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
174 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
175 64);
176}
177
178static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
179 struct fsl_desc_sw *new_desc)
180{
181 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
182
183 if (list_empty(&fsl_chan->ld_queue))
184 return;
185
186 /* Link to the new descriptor physical address and
187 * Enable End-of-segment interrupt for
188 * the last link descriptor.
189 * (the previous node's next link descriptor)
190 *
191 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
192 */
193 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
194 new_desc->async_tx.phys | FSL_DMA_EOSIE |
195 (((fsl_chan->feature & FSL_DMA_IP_MASK)
196 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
197}
198
199/**
200 * fsl_chan_set_src_loop_size - Set source address hold transfer size
201 * @fsl_chan : Freescale DMA channel
202 * @size : Address loop size, 0 for disable loop
203 *
204 * The set source address hold transfer size. The source
205 * address hold or loop transfer size is when the DMA transfer
206 * data from source address (SA), if the loop size is 4, the DMA will
207 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
208 * SA + 1 ... and so on.
209 */
210static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
211{
212 switch (size) {
213 case 0:
214 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
215 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
216 (~FSL_DMA_MR_SAHE), 32);
217 break;
218 case 1:
219 case 2:
220 case 4:
221 case 8:
222 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
223 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
224 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
225 32);
226 break;
227 }
228}
229
230/**
231 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
232 * @fsl_chan : Freescale DMA channel
233 * @size : Address loop size, 0 for disable loop
234 *
235 * The set destination address hold transfer size. The destination
236 * address hold or loop transfer size is when the DMA transfer
237 * data to destination address (TA), if the loop size is 4, the DMA will
238 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
239 * TA + 1 ... and so on.
240 */
241static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
242{
243 switch (size) {
244 case 0:
245 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
246 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
247 (~FSL_DMA_MR_DAHE), 32);
248 break;
249 case 1:
250 case 2:
251 case 4:
252 case 8:
253 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
254 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
255 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
256 32);
257 break;
258 }
259}
260
261/**
262 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
263 * @fsl_chan : Freescale DMA channel
264 * @size : Pause control size, 0 for disable external pause control.
265 * The maximum is 1024.
266 *
267 * The Freescale DMA channel can be controlled by the external
268 * signal DREQ#. The pause control size is how many bytes are allowed
269 * to transfer before pausing the channel, after which a new assertion
270 * of DREQ# resumes channel operation.
271 */
272static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
273{
274 if (size > 1024)
275 return;
276
277 if (size) {
278 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
279 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
280 | ((__ilog2(size) << 24) & 0x0f000000),
281 32);
282 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
283 } else
284 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
285}
286
287/**
288 * fsl_chan_toggle_ext_start - Toggle channel external start status
289 * @fsl_chan : Freescale DMA channel
290 * @enable : 0 is disabled, 1 is enabled.
291 *
292 * If enable the external start, the channel can be started by an
293 * external DMA start pin. So the dma_start() does not start the
294 * transfer immediately. The DMA channel will wait for the
295 * control pin asserted.
296 */
297static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
298{
299 if (enable)
300 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
301 else
302 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
303}
304
305static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
306{
307 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
308 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
309 unsigned long flags;
310 dma_cookie_t cookie;
311
312 /* cookie increment and adding to ld_queue must be atomic */
313 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
314
315 cookie = fsl_chan->common.cookie;
316 cookie++;
317 if (cookie < 0)
318 cookie = 1;
319 desc->async_tx.cookie = cookie;
320 fsl_chan->common.cookie = desc->async_tx.cookie;
321
322 append_ld_queue(fsl_chan, desc);
323 list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
324
325 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
326
327 return cookie;
328}
329
330/**
331 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
332 * @fsl_chan : Freescale DMA channel
333 *
334 * Return - The descriptor allocated. NULL for failed.
335 */
336static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
337 struct fsl_dma_chan *fsl_chan)
338{
339 dma_addr_t pdesc;
340 struct fsl_desc_sw *desc_sw;
341
342 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
343 if (desc_sw) {
344 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
345 dma_async_tx_descriptor_init(&desc_sw->async_tx,
346 &fsl_chan->common);
347 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
348 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
349 desc_sw->async_tx.phys = pdesc;
350 }
351
352 return desc_sw;
353}
354
355
356/**
357 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
358 * @fsl_chan : Freescale DMA channel
359 *
360 * This function will create a dma pool for descriptor allocation.
361 *
362 * Return - The number of descriptors allocated.
363 */
364static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
365{
366 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
367 LIST_HEAD(tmp_list);
368
369 /* We need the descriptor to be aligned to 32bytes
370 * for meeting FSL DMA specification requirement.
371 */
372 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
373 fsl_chan->dev, sizeof(struct fsl_desc_sw),
374 32, 0);
375 if (!fsl_chan->desc_pool) {
376 dev_err(fsl_chan->dev, "No memory for channel %d "
377 "descriptor dma pool.\n", fsl_chan->id);
378 return 0;
379 }
380
381 return 1;
382}
383
384/**
385 * fsl_dma_free_chan_resources - Free all resources of the channel.
386 * @fsl_chan : Freescale DMA channel
387 */
388static void fsl_dma_free_chan_resources(struct dma_chan *chan)
389{
390 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
391 struct fsl_desc_sw *desc, *_desc;
392 unsigned long flags;
393
394 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
395 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
396 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
397#ifdef FSL_DMA_LD_DEBUG
398 dev_dbg(fsl_chan->dev,
399 "LD %p will be released.\n", desc);
400#endif
401 list_del(&desc->node);
402 /* free link descriptor */
403 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
404 }
405 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
406 dma_pool_destroy(fsl_chan->desc_pool);
407}
408
409static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
410 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
411 size_t len, unsigned long flags)
412{
413 struct fsl_dma_chan *fsl_chan;
414 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
415 size_t copy;
416 LIST_HEAD(link_chain);
417
418 if (!chan)
419 return NULL;
420
421 if (!len)
422 return NULL;
423
424 fsl_chan = to_fsl_chan(chan);
425
426 do {
427
428 /* Allocate the link descriptor from DMA pool */
429 new = fsl_dma_alloc_descriptor(fsl_chan);
430 if (!new) {
431 dev_err(fsl_chan->dev,
432 "No free memory for link descriptor\n");
433 return NULL;
434 }
435#ifdef FSL_DMA_LD_DEBUG
436 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
437#endif
438
439 copy = min(len, FSL_DMA_BCR_MAX_CNT);
440
441 set_desc_cnt(fsl_chan, &new->hw, copy);
442 set_desc_src(fsl_chan, &new->hw, dma_src);
443 set_desc_dest(fsl_chan, &new->hw, dma_dest);
444
445 if (!first)
446 first = new;
447 else
448 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
449
450 new->async_tx.cookie = 0;
451 new->async_tx.ack = 1;
452
453 prev = new;
454 len -= copy;
455 dma_src += copy;
456 dma_dest += copy;
457
458 /* Insert the link descriptor to the LD ring */
459 list_add_tail(&new->node, &first->async_tx.tx_list);
460 } while (len);
461
462 new->async_tx.ack = 0; /* client is in control of this ack */
463 new->async_tx.cookie = -EBUSY;
464
465 /* Set End-of-link to the last link descriptor of new list*/
466 set_ld_eol(fsl_chan, new);
467
468 return first ? &first->async_tx : NULL;
469}
470
471/**
472 * fsl_dma_update_completed_cookie - Update the completed cookie.
473 * @fsl_chan : Freescale DMA channel
474 */
475static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
476{
477 struct fsl_desc_sw *cur_desc, *desc;
478 dma_addr_t ld_phy;
479
480 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
481
482 if (ld_phy) {
483 cur_desc = NULL;
484 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
485 if (desc->async_tx.phys == ld_phy) {
486 cur_desc = desc;
487 break;
488 }
489
490 if (cur_desc && cur_desc->async_tx.cookie) {
491 if (dma_is_idle(fsl_chan))
492 fsl_chan->completed_cookie =
493 cur_desc->async_tx.cookie;
494 else
495 fsl_chan->completed_cookie =
496 cur_desc->async_tx.cookie - 1;
497 }
498 }
499}
500
501/**
502 * fsl_chan_ld_cleanup - Clean up link descriptors
503 * @fsl_chan : Freescale DMA channel
504 *
505 * This function clean up the ld_queue of DMA channel.
506 * If 'in_intr' is set, the function will move the link descriptor to
507 * the recycle list. Otherwise, free it directly.
508 */
509static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
510{
511 struct fsl_desc_sw *desc, *_desc;
512 unsigned long flags;
513
514 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
515
516 fsl_dma_update_completed_cookie(fsl_chan);
517 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
518 fsl_chan->completed_cookie);
519 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
520 dma_async_tx_callback callback;
521 void *callback_param;
522
523 if (dma_async_is_complete(desc->async_tx.cookie,
524 fsl_chan->completed_cookie, fsl_chan->common.cookie)
525 == DMA_IN_PROGRESS)
526 break;
527
528 callback = desc->async_tx.callback;
529 callback_param = desc->async_tx.callback_param;
530
531 /* Remove from ld_queue list */
532 list_del(&desc->node);
533
534 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
535 desc);
536 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
537
538 /* Run the link descriptor callback function */
539 if (callback) {
540 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
541 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
542 desc);
543 callback(callback_param);
544 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
545 }
546 }
547 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
548}
549
550/**
551 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
552 * @fsl_chan : Freescale DMA channel
553 */
554static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
555{
556 struct list_head *ld_node;
557 dma_addr_t next_dest_addr;
558 unsigned long flags;
559
560 if (!dma_is_idle(fsl_chan))
561 return;
562
563 dma_halt(fsl_chan);
564
565 /* If there are some link descriptors
566 * not transfered in queue. We need to start it.
567 */
568 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
569
570 /* Find the first un-transfer desciptor */
571 for (ld_node = fsl_chan->ld_queue.next;
572 (ld_node != &fsl_chan->ld_queue)
573 && (dma_async_is_complete(
574 to_fsl_desc(ld_node)->async_tx.cookie,
575 fsl_chan->completed_cookie,
576 fsl_chan->common.cookie) == DMA_SUCCESS);
577 ld_node = ld_node->next);
578
579 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
580
581 if (ld_node != &fsl_chan->ld_queue) {
582 /* Get the ld start address from ld_queue */
583 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
584 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n",
585 (u64)next_dest_addr);
586 set_cdar(fsl_chan, next_dest_addr);
587 dma_start(fsl_chan);
588 } else {
589 set_cdar(fsl_chan, 0);
590 set_ndar(fsl_chan, 0);
591 }
592}
593
594/**
595 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
596 * @fsl_chan : Freescale DMA channel
597 */
598static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
599{
600 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
601
602#ifdef FSL_DMA_LD_DEBUG
603 struct fsl_desc_sw *ld;
604 unsigned long flags;
605
606 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
607 if (list_empty(&fsl_chan->ld_queue)) {
608 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
609 return;
610 }
611
612 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
613 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
614 int i;
615 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
616 fsl_chan->id, ld->async_tx.phys);
617 for (i = 0; i < 8; i++)
618 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
619 i, *(((u32 *)&ld->hw) + i));
620 }
621 dev_dbg(fsl_chan->dev, "----------------\n");
622 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
623#endif
624
625 fsl_chan_xfer_ld_queue(fsl_chan);
626}
627
628static void fsl_dma_dependency_added(struct dma_chan *chan)
629{
630 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
631
632 fsl_chan_ld_cleanup(fsl_chan);
633}
634
635/**
636 * fsl_dma_is_complete - Determine the DMA status
637 * @fsl_chan : Freescale DMA channel
638 */
639static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
640 dma_cookie_t cookie,
641 dma_cookie_t *done,
642 dma_cookie_t *used)
643{
644 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
645 dma_cookie_t last_used;
646 dma_cookie_t last_complete;
647
648 fsl_chan_ld_cleanup(fsl_chan);
649
650 last_used = chan->cookie;
651 last_complete = fsl_chan->completed_cookie;
652
653 if (done)
654 *done = last_complete;
655
656 if (used)
657 *used = last_used;
658
659 return dma_async_is_complete(cookie, last_complete, last_used);
660}
661
662static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
663{
664 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
665 dma_addr_t stat;
666
667 stat = get_sr(fsl_chan);
668 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
669 fsl_chan->id, stat);
670 set_sr(fsl_chan, stat); /* Clear the event register */
671
672 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
673 if (!stat)
674 return IRQ_NONE;
675
676 if (stat & FSL_DMA_SR_TE)
677 dev_err(fsl_chan->dev, "Transfer Error!\n");
678
679 /* If the link descriptor segment transfer finishes,
680 * we will recycle the used descriptor.
681 */
682 if (stat & FSL_DMA_SR_EOSI) {
683 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
684 dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, "
685 "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan),
686 (u64)get_ndar(fsl_chan));
687 stat &= ~FSL_DMA_SR_EOSI;
688 }
689
690 /* If it current transfer is the end-of-transfer,
691 * we should clear the Channel Start bit for
692 * prepare next transfer.
693 */
694 if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) {
695 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
696 stat &= ~FSL_DMA_SR_EOLNI;
697 fsl_chan_xfer_ld_queue(fsl_chan);
698 }
699
700 if (stat)
701 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
702 stat);
703
704 dev_dbg(fsl_chan->dev, "event: Exit\n");
705 tasklet_schedule(&fsl_chan->tasklet);
706 return IRQ_HANDLED;
707}
708
709static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
710{
711 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
712 u32 gsr;
713 int ch_nr;
714
715 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
716 : in_le32(fdev->reg_base);
717 ch_nr = (32 - ffs(gsr)) / 8;
718
719 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
720 fdev->chan[ch_nr]) : IRQ_NONE;
721}
722
723static void dma_do_tasklet(unsigned long data)
724{
725 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
726 fsl_chan_ld_cleanup(fsl_chan);
727}
728
729static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan)
730{
731 if (fsl_chan)
732 dev_info(fsl_chan->dev, "selftest: callback is ok!\n");
733}
734
735static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
736{
737 struct dma_chan *chan;
738 int err = 0;
739 dma_addr_t dma_dest, dma_src;
740 dma_cookie_t cookie;
741 u8 *src, *dest;
742 int i;
743 size_t test_size;
744 struct dma_async_tx_descriptor *tx1, *tx2, *tx3;
745
746 test_size = 4096;
747
748 src = kmalloc(test_size * 2, GFP_KERNEL);
749 if (!src) {
750 dev_err(fsl_chan->dev,
751 "selftest: Cannot alloc memory for test!\n");
752 err = -ENOMEM;
753 goto out;
754 }
755
756 dest = src + test_size;
757
758 for (i = 0; i < test_size; i++)
759 src[i] = (u8) i;
760
761 chan = &fsl_chan->common;
762
763 if (fsl_dma_alloc_chan_resources(chan) < 1) {
764 dev_err(fsl_chan->dev,
765 "selftest: Cannot alloc resources for DMA\n");
766 err = -ENODEV;
767 goto out;
768 }
769
770 /* TX 1 */
771 dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2,
772 DMA_TO_DEVICE);
773 dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2,
774 DMA_FROM_DEVICE);
775 tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0);
776 async_tx_ack(tx1);
777
778 cookie = fsl_dma_tx_submit(tx1);
779 fsl_dma_memcpy_issue_pending(chan);
780 msleep(2);
781
782 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
783 dev_err(fsl_chan->dev, "selftest: Time out!\n");
784 err = -ENODEV;
785 goto out;
786 }
787
788 /* Test free and re-alloc channel resources */
789 fsl_dma_free_chan_resources(chan);
790
791 if (fsl_dma_alloc_chan_resources(chan) < 1) {
792 dev_err(fsl_chan->dev,
793 "selftest: Cannot alloc resources for DMA\n");
794 err = -ENODEV;
795 goto free_resources;
796 }
797
798 /* Continue to test
799 * TX 2
800 */
801 dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2,
802 test_size / 4, DMA_TO_DEVICE);
803 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2,
804 test_size / 4, DMA_FROM_DEVICE);
805 tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
806 async_tx_ack(tx2);
807
808 /* TX 3 */
809 dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4,
810 test_size / 4, DMA_TO_DEVICE);
811 dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4,
812 test_size / 4, DMA_FROM_DEVICE);
813 tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
814 async_tx_ack(tx3);
815
816 /* Test exchanging the prepared tx sort */
817 cookie = fsl_dma_tx_submit(tx3);
818 cookie = fsl_dma_tx_submit(tx2);
819
820#ifdef FSL_DMA_CALLBACKTEST
821 if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *)
822 dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) {
823 tx3->callback = fsl_dma_callback_test;
824 tx3->callback_param = fsl_chan;
825 }
826#endif
827 fsl_dma_memcpy_issue_pending(chan);
828 msleep(2);
829
830 if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
831 dev_err(fsl_chan->dev, "selftest: Time out!\n");
832 err = -ENODEV;
833 goto free_resources;
834 }
835
836 err = memcmp(src, dest, test_size);
837 if (err) {
838 for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
839 i++);
840 dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is "
841 "error! src 0x%x, dest 0x%x\n",
842 i, test_size, *(src + i), *(dest + i));
843 }
844
845free_resources:
846 fsl_dma_free_chan_resources(chan);
847out:
848 kfree(src);
849 return err;
850}
851
852static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
853 const struct of_device_id *match)
854{
855 struct fsl_dma_device *fdev;
856 struct fsl_dma_chan *new_fsl_chan;
857 int err;
858
859 fdev = dev_get_drvdata(dev->dev.parent);
860 BUG_ON(!fdev);
861
862 /* alloc channel */
863 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
864 if (!new_fsl_chan) {
865 dev_err(&dev->dev, "No free memory for allocating "
866 "dma channels!\n");
867 err = -ENOMEM;
868 goto err;
869 }
870
871 /* get dma channel register base */
872 err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg);
873 if (err) {
874 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
875 dev->node->full_name);
876 goto err;
877 }
878
879 new_fsl_chan->feature = *(u32 *)match->data;
880
881 if (!fdev->feature)
882 fdev->feature = new_fsl_chan->feature;
883
884 /* If the DMA device's feature is different than its channels',
885 * report the bug.
886 */
887 WARN_ON(fdev->feature != new_fsl_chan->feature);
888
889 new_fsl_chan->dev = &dev->dev;
890 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
891 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
892
893 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
894 if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
895 dev_err(&dev->dev, "There is no %d channel!\n",
896 new_fsl_chan->id);
897 err = -EINVAL;
898 goto err;
899 }
900 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
901 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
902 (unsigned long)new_fsl_chan);
903
904 /* Init the channel */
905 dma_init(new_fsl_chan);
906
907 /* Clear cdar registers */
908 set_cdar(new_fsl_chan, 0);
909
910 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
911 case FSL_DMA_IP_85XX:
912 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
913 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
914 case FSL_DMA_IP_83XX:
915 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
916 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
917 }
918
919 spin_lock_init(&new_fsl_chan->desc_lock);
920 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
921
922 new_fsl_chan->common.device = &fdev->common;
923
924 /* Add the channel to DMA device channel list */
925 list_add_tail(&new_fsl_chan->common.device_node,
926 &fdev->common.channels);
927 fdev->common.chancnt++;
928
929 new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0);
930 if (new_fsl_chan->irq != NO_IRQ) {
931 err = request_irq(new_fsl_chan->irq,
932 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
933 "fsldma-channel", new_fsl_chan);
934 if (err) {
935 dev_err(&dev->dev, "DMA channel %s request_irq error "
936 "with return %d\n", dev->node->full_name, err);
937 goto err;
938 }
939 }
940
941#ifdef CONFIG_FSL_DMA_SELFTEST
942 err = fsl_dma_self_test(new_fsl_chan);
943 if (err)
944 goto err;
945#endif
946
947 dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
948 match->compatible, new_fsl_chan->irq);
949
950 return 0;
951err:
952 dma_halt(new_fsl_chan);
953 iounmap(new_fsl_chan->reg_base);
954 free_irq(new_fsl_chan->irq, new_fsl_chan);
955 list_del(&new_fsl_chan->common.device_node);
956 kfree(new_fsl_chan);
957 return err;
958}
959
960const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN;
961const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN;
962
963static struct of_device_id of_fsl_dma_chan_ids[] = {
964 {
965 .compatible = "fsl,mpc8540-dma-channel",
966 .data = (void *)&mpc8540_dma_ip_feature,
967 },
968 {
969 .compatible = "fsl,mpc8349-dma-channel",
970 .data = (void *)&mpc8349_dma_ip_feature,
971 },
972 {}
973};
974
975static struct of_platform_driver of_fsl_dma_chan_driver = {
976 .name = "of-fsl-dma-channel",
977 .match_table = of_fsl_dma_chan_ids,
978 .probe = of_fsl_dma_chan_probe,
979};
980
981static __init int of_fsl_dma_chan_init(void)
982{
983 return of_register_platform_driver(&of_fsl_dma_chan_driver);
984}
985
986static int __devinit of_fsl_dma_probe(struct of_device *dev,
987 const struct of_device_id *match)
988{
989 int err;
990 unsigned int irq;
991 struct fsl_dma_device *fdev;
992
993 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
994 if (!fdev) {
995 dev_err(&dev->dev, "No enough memory for 'priv'\n");
996 err = -ENOMEM;
997 goto err;
998 }
999 fdev->dev = &dev->dev;
1000 INIT_LIST_HEAD(&fdev->common.channels);
1001
1002 /* get DMA controller register base */
1003 err = of_address_to_resource(dev->node, 0, &fdev->reg);
1004 if (err) {
1005 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1006 dev->node->full_name);
1007 goto err;
1008 }
1009
1010 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
1011 "controller at 0x%08x...\n",
1012 match->compatible, fdev->reg.start);
1013 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
1014 - fdev->reg.start + 1);
1015
1016 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1017 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1018 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1019 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1020 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1021 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
1022 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1023 fdev->common.device_dependency_added = fsl_dma_dependency_added;
1024 fdev->common.dev = &dev->dev;
1025
1026 irq = irq_of_parse_and_map(dev->node, 0);
1027 if (irq != NO_IRQ) {
1028 err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED,
1029 "fsldma-device", fdev);
1030 if (err) {
1031 dev_err(&dev->dev, "DMA device request_irq error "
1032 "with return %d\n", err);
1033 goto err;
1034 }
1035 }
1036
1037 dev_set_drvdata(&(dev->dev), fdev);
1038 of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev);
1039
1040 dma_async_device_register(&fdev->common);
1041 return 0;
1042
1043err:
1044 iounmap(fdev->reg_base);
1045 kfree(fdev);
1046 return err;
1047}
1048
1049static struct of_device_id of_fsl_dma_ids[] = {
1050 { .compatible = "fsl,mpc8540-dma", },
1051 { .compatible = "fsl,mpc8349-dma", },
1052 {}
1053};
1054
1055static struct of_platform_driver of_fsl_dma_driver = {
1056 .name = "of-fsl-dma",
1057 .match_table = of_fsl_dma_ids,
1058 .probe = of_fsl_dma_probe,
1059};
1060
1061static __init int of_fsl_dma_init(void)
1062{
1063 return of_register_platform_driver(&of_fsl_dma_driver);
1064}
1065
1066subsys_initcall(of_fsl_dma_chan_init);
1067subsys_initcall(of_fsl_dma_init);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
new file mode 100644
index 000000000000..ba78c42121ba
--- /dev/null
+++ b/drivers/dma/fsldma.h
@@ -0,0 +1,189 @@
1/*
2 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author:
5 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
6 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14#ifndef __DMA_FSLDMA_H
15#define __DMA_FSLDMA_H
16
17#include <linux/device.h>
18#include <linux/dmapool.h>
19#include <linux/dmaengine.h>
20
21/* Define data structures needed by Freescale
22 * MPC8540 and MPC8349 DMA controller.
23 */
24#define FSL_DMA_MR_CS 0x00000001
25#define FSL_DMA_MR_CC 0x00000002
26#define FSL_DMA_MR_CA 0x00000008
27#define FSL_DMA_MR_EIE 0x00000040
28#define FSL_DMA_MR_XFE 0x00000020
29#define FSL_DMA_MR_EOLNIE 0x00000100
30#define FSL_DMA_MR_EOLSIE 0x00000080
31#define FSL_DMA_MR_EOSIE 0x00000200
32#define FSL_DMA_MR_CDSM 0x00000010
33#define FSL_DMA_MR_CTM 0x00000004
34#define FSL_DMA_MR_EMP_EN 0x00200000
35#define FSL_DMA_MR_EMS_EN 0x00040000
36#define FSL_DMA_MR_DAHE 0x00002000
37#define FSL_DMA_MR_SAHE 0x00001000
38
39/* Special MR definition for MPC8349 */
40#define FSL_DMA_MR_EOTIE 0x00000080
41
42#define FSL_DMA_SR_CH 0x00000020
43#define FSL_DMA_SR_CB 0x00000004
44#define FSL_DMA_SR_TE 0x00000080
45#define FSL_DMA_SR_EOSI 0x00000002
46#define FSL_DMA_SR_EOLSI 0x00000001
47#define FSL_DMA_SR_EOCDI 0x00000001
48#define FSL_DMA_SR_EOLNI 0x00000008
49
50#define FSL_DMA_SATR_SBPATMU 0x20000000
51#define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000
52#define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000
53#define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000
54#define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000
55#define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000
56
57#define FSL_DMA_DATR_DBPATMU 0x20000000
58#define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000
59#define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000
60#define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000
61
62#define FSL_DMA_EOL ((u64)0x1)
63#define FSL_DMA_SNEN ((u64)0x10)
64#define FSL_DMA_EOSIE 0x8
65#define FSL_DMA_NLDA_MASK (~(u64)0x1f)
66
67#define FSL_DMA_BCR_MAX_CNT 0x03ffffffu
68
69#define FSL_DMA_DGSR_TE 0x80
70#define FSL_DMA_DGSR_CH 0x20
71#define FSL_DMA_DGSR_PE 0x10
72#define FSL_DMA_DGSR_EOLNI 0x08
73#define FSL_DMA_DGSR_CB 0x04
74#define FSL_DMA_DGSR_EOSI 0x02
75#define FSL_DMA_DGSR_EOLSI 0x01
76
77struct fsl_dma_ld_hw {
78 u64 __bitwise src_addr;
79 u64 __bitwise dst_addr;
80 u64 __bitwise next_ln_addr;
81 u32 __bitwise count;
82 u32 __bitwise reserve;
83} __attribute__((aligned(32)));
84
85struct fsl_desc_sw {
86 struct fsl_dma_ld_hw hw;
87 struct list_head node;
88 struct dma_async_tx_descriptor async_tx;
89 struct list_head *ld;
90 void *priv;
91} __attribute__((aligned(32)));
92
93struct fsl_dma_chan_regs {
94 u32 __bitwise mr; /* 0x00 - Mode Register */
95 u32 __bitwise sr; /* 0x04 - Status Register */
96 u64 __bitwise cdar; /* 0x08 - Current descriptor address register */
97 u64 __bitwise sar; /* 0x10 - Source Address Register */
98 u64 __bitwise dar; /* 0x18 - Destination Address Register */
99 u32 __bitwise bcr; /* 0x20 - Byte Count Register */
100 u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */
101};
102
103struct fsl_dma_chan;
104#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
105
106struct fsl_dma_device {
107 void __iomem *reg_base; /* DGSR register base */
108 struct resource reg; /* Resource for register */
109 struct device *dev;
110 struct dma_device common;
111 struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
112 u32 feature; /* The same as DMA channels */
113};
114
115/* Define macros for fsl_dma_chan->feature property */
116#define FSL_DMA_LITTLE_ENDIAN 0x00000000
117#define FSL_DMA_BIG_ENDIAN 0x00000001
118
119#define FSL_DMA_IP_MASK 0x00000ff0
120#define FSL_DMA_IP_85XX 0x00000010
121#define FSL_DMA_IP_83XX 0x00000020
122
123#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
124#define FSL_DMA_CHAN_START_EXT 0x00002000
125
126struct fsl_dma_chan {
127 struct fsl_dma_chan_regs __iomem *reg_base;
128 dma_cookie_t completed_cookie; /* The maximum cookie completed */
129 spinlock_t desc_lock; /* Descriptor operation lock */
130 struct list_head ld_queue; /* Link descriptors queue */
131 struct dma_chan common; /* DMA common channel */
132 struct dma_pool *desc_pool; /* Descriptors pool */
133 struct device *dev; /* Channel device */
134 struct resource reg; /* Resource for register */
135 int irq; /* Channel IRQ */
136 int id; /* Raw id of this channel */
137 struct tasklet_struct tasklet;
138 u32 feature;
139
140 void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size);
141 void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
142 void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
143 void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
144};
145
146#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
147#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
148#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
149
150#ifndef __powerpc64__
151static u64 in_be64(const u64 __iomem *addr)
152{
153 return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1));
154}
155
156static void out_be64(u64 __iomem *addr, u64 val)
157{
158 out_be32((u32 *)addr, val >> 32);
159 out_be32((u32 *)addr + 1, (u32)val);
160}
161
162/* There is no asm instructions for 64 bits reverse loads and stores */
163static u64 in_le64(const u64 __iomem *addr)
164{
165 return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr));
166}
167
168static void out_le64(u64 __iomem *addr, u64 val)
169{
170 out_le32((u32 *)addr + 1, val >> 32);
171 out_le32((u32 *)addr, (u32)val);
172}
173#endif
174
175#define DMA_IN(fsl_chan, addr, width) \
176 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
177 in_be##width(addr) : in_le##width(addr))
178#define DMA_OUT(fsl_chan, addr, val, width) \
179 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
180 out_be##width(addr, val) : out_le##width(addr, val))
181
182#define DMA_TO_CPU(fsl_chan, d, width) \
183 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
184 be##width##_to_cpu(d) : le##width##_to_cpu(d))
185#define CPU_TO_DMA(fsl_chan, c, width) \
186 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
187 cpu_to_be##width(c) : cpu_to_le##width(c))
188
189#endif /* __DMA_FSLDMA_H */
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index dff38accc5c1..4017d9e7acd2 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
714 new->len = len; 714 new->len = len;
715 new->dst = dma_dest; 715 new->dst = dma_dest;
716 new->src = dma_src; 716 new->src = dma_src;
717 new->async_tx.ack = 0;
717 return &new->async_tx; 718 return &new->async_tx;
718 } else 719 } else
719 return NULL; 720 return NULL;
@@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
741 new->len = len; 742 new->len = len;
742 new->dst = dma_dest; 743 new->dst = dma_dest;
743 new->src = dma_src; 744 new->src = dma_src;
745 new->async_tx.ack = 0;
744 return &new->async_tx; 746 return &new->async_tx;
745 } else 747 } else
746 return NULL; 748 return NULL;
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index 3e9719948a8e..a03462750b95 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/delay.h>
21#include <linux/device.h> 22#include <linux/device.h>
22#include <linux/mutex.h> 23#include <linux/mutex.h>
23#include <linux/crc-itu-t.h> 24#include <linux/crc-itu-t.h>
@@ -214,17 +215,29 @@ static void
214fw_card_bm_work(struct work_struct *work) 215fw_card_bm_work(struct work_struct *work)
215{ 216{
216 struct fw_card *card = container_of(work, struct fw_card, work.work); 217 struct fw_card *card = container_of(work, struct fw_card, work.work);
217 struct fw_device *root; 218 struct fw_device *root_device;
219 struct fw_node *root_node, *local_node;
218 struct bm_data bmd; 220 struct bm_data bmd;
219 unsigned long flags; 221 unsigned long flags;
220 int root_id, new_root_id, irm_id, gap_count, generation, grace; 222 int root_id, new_root_id, irm_id, gap_count, generation, grace;
221 int do_reset = 0; 223 int do_reset = 0;
222 224
223 spin_lock_irqsave(&card->lock, flags); 225 spin_lock_irqsave(&card->lock, flags);
226 local_node = card->local_node;
227 root_node = card->root_node;
228
229 if (local_node == NULL) {
230 spin_unlock_irqrestore(&card->lock, flags);
231 return;
232 }
233 fw_node_get(local_node);
234 fw_node_get(root_node);
224 235
225 generation = card->generation; 236 generation = card->generation;
226 root = card->root_node->data; 237 root_device = root_node->data;
227 root_id = card->root_node->node_id; 238 if (root_device)
239 fw_device_get(root_device);
240 root_id = root_node->node_id;
228 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); 241 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
229 242
230 if (card->bm_generation + 1 == generation || 243 if (card->bm_generation + 1 == generation ||
@@ -243,14 +256,14 @@ fw_card_bm_work(struct work_struct *work)
243 256
244 irm_id = card->irm_node->node_id; 257 irm_id = card->irm_node->node_id;
245 if (!card->irm_node->link_on) { 258 if (!card->irm_node->link_on) {
246 new_root_id = card->local_node->node_id; 259 new_root_id = local_node->node_id;
247 fw_notify("IRM has link off, making local node (%02x) root.\n", 260 fw_notify("IRM has link off, making local node (%02x) root.\n",
248 new_root_id); 261 new_root_id);
249 goto pick_me; 262 goto pick_me;
250 } 263 }
251 264
252 bmd.lock.arg = cpu_to_be32(0x3f); 265 bmd.lock.arg = cpu_to_be32(0x3f);
253 bmd.lock.data = cpu_to_be32(card->local_node->node_id); 266 bmd.lock.data = cpu_to_be32(local_node->node_id);
254 267
255 spin_unlock_irqrestore(&card->lock, flags); 268 spin_unlock_irqrestore(&card->lock, flags);
256 269
@@ -267,12 +280,12 @@ fw_card_bm_work(struct work_struct *work)
267 * Another bus reset happened. Just return, 280 * Another bus reset happened. Just return,
268 * the BM work has been rescheduled. 281 * the BM work has been rescheduled.
269 */ 282 */
270 return; 283 goto out;
271 } 284 }
272 285
273 if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) 286 if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f)
274 /* Somebody else is BM, let them do the work. */ 287 /* Somebody else is BM, let them do the work. */
275 return; 288 goto out;
276 289
277 spin_lock_irqsave(&card->lock, flags); 290 spin_lock_irqsave(&card->lock, flags);
278 if (bmd.rcode != RCODE_COMPLETE) { 291 if (bmd.rcode != RCODE_COMPLETE) {
@@ -282,7 +295,7 @@ fw_card_bm_work(struct work_struct *work)
282 * do a bus reset and pick the local node as 295 * do a bus reset and pick the local node as
283 * root, and thus, IRM. 296 * root, and thus, IRM.
284 */ 297 */
285 new_root_id = card->local_node->node_id; 298 new_root_id = local_node->node_id;
286 fw_notify("BM lock failed, making local node (%02x) root.\n", 299 fw_notify("BM lock failed, making local node (%02x) root.\n",
287 new_root_id); 300 new_root_id);
288 goto pick_me; 301 goto pick_me;
@@ -295,7 +308,7 @@ fw_card_bm_work(struct work_struct *work)
295 */ 308 */
296 spin_unlock_irqrestore(&card->lock, flags); 309 spin_unlock_irqrestore(&card->lock, flags);
297 schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); 310 schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
298 return; 311 goto out;
299 } 312 }
300 313
301 /* 314 /*
@@ -305,20 +318,20 @@ fw_card_bm_work(struct work_struct *work)
305 */ 318 */
306 card->bm_generation = generation; 319 card->bm_generation = generation;
307 320
308 if (root == NULL) { 321 if (root_device == NULL) {
309 /* 322 /*
310 * Either link_on is false, or we failed to read the 323 * Either link_on is false, or we failed to read the
311 * config rom. In either case, pick another root. 324 * config rom. In either case, pick another root.
312 */ 325 */
313 new_root_id = card->local_node->node_id; 326 new_root_id = local_node->node_id;
314 } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { 327 } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) {
315 /* 328 /*
316 * If we haven't probed this device yet, bail out now 329 * If we haven't probed this device yet, bail out now
317 * and let's try again once that's done. 330 * and let's try again once that's done.
318 */ 331 */
319 spin_unlock_irqrestore(&card->lock, flags); 332 spin_unlock_irqrestore(&card->lock, flags);
320 return; 333 goto out;
321 } else if (root->config_rom[2] & BIB_CMC) { 334 } else if (root_device->config_rom[2] & BIB_CMC) {
322 /* 335 /*
323 * FIXME: I suppose we should set the cmstr bit in the 336 * FIXME: I suppose we should set the cmstr bit in the
324 * STATE_CLEAR register of this node, as described in 337 * STATE_CLEAR register of this node, as described in
@@ -332,7 +345,7 @@ fw_card_bm_work(struct work_struct *work)
332 * successfully read the config rom, but it's not 345 * successfully read the config rom, but it's not
333 * cycle master capable. 346 * cycle master capable.
334 */ 347 */
335 new_root_id = card->local_node->node_id; 348 new_root_id = local_node->node_id;
336 } 349 }
337 350
338 pick_me: 351 pick_me:
@@ -341,8 +354,8 @@ fw_card_bm_work(struct work_struct *work)
341 * the typically much larger 1394b beta repeater delays though. 354 * the typically much larger 1394b beta repeater delays though.
342 */ 355 */
343 if (!card->beta_repeaters_present && 356 if (!card->beta_repeaters_present &&
344 card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) 357 root_node->max_hops < ARRAY_SIZE(gap_count_table))
345 gap_count = gap_count_table[card->root_node->max_hops]; 358 gap_count = gap_count_table[root_node->max_hops];
346 else 359 else
347 gap_count = 63; 360 gap_count = 63;
348 361
@@ -364,6 +377,11 @@ fw_card_bm_work(struct work_struct *work)
364 fw_send_phy_config(card, new_root_id, generation, gap_count); 377 fw_send_phy_config(card, new_root_id, generation, gap_count);
365 fw_core_initiate_bus_reset(card, 1); 378 fw_core_initiate_bus_reset(card, 1);
366 } 379 }
380 out:
381 if (root_device)
382 fw_device_put(root_device);
383 fw_node_put(root_node);
384 fw_node_put(local_node);
367} 385}
368 386
369static void 387static void
@@ -381,6 +399,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
381 static atomic_t index = ATOMIC_INIT(-1); 399 static atomic_t index = ATOMIC_INIT(-1);
382 400
383 kref_init(&card->kref); 401 kref_init(&card->kref);
402 atomic_set(&card->device_count, 0);
384 card->index = atomic_inc_return(&index); 403 card->index = atomic_inc_return(&index);
385 card->driver = driver; 404 card->driver = driver;
386 card->device = device; 405 card->device = device;
@@ -511,8 +530,14 @@ fw_core_remove_card(struct fw_card *card)
511 card->driver = &dummy_driver; 530 card->driver = &dummy_driver;
512 531
513 fw_destroy_nodes(card); 532 fw_destroy_nodes(card);
514 flush_scheduled_work(); 533 /*
534 * Wait for all device workqueue jobs to finish. Otherwise the
535 * firewire-core module could be unloaded before the jobs ran.
536 */
537 while (atomic_read(&card->device_count) > 0)
538 msleep(100);
515 539
540 cancel_delayed_work_sync(&card->work);
516 fw_flush_transactions(card); 541 fw_flush_transactions(card);
517 del_timer_sync(&card->flush_timer); 542 del_timer_sync(&card->flush_timer);
518 543
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index 7e73cbaa4121..46bc197a047f 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
109 struct client *client; 109 struct client *client;
110 unsigned long flags; 110 unsigned long flags;
111 111
112 device = fw_device_from_devt(inode->i_rdev); 112 device = fw_device_get_by_devt(inode->i_rdev);
113 if (device == NULL) 113 if (device == NULL)
114 return -ENODEV; 114 return -ENODEV;
115 115
116 client = kzalloc(sizeof(*client), GFP_KERNEL); 116 client = kzalloc(sizeof(*client), GFP_KERNEL);
117 if (client == NULL) 117 if (client == NULL) {
118 fw_device_put(device);
118 return -ENOMEM; 119 return -ENOMEM;
120 }
119 121
120 client->device = fw_device_get(device); 122 client->device = device;
121 INIT_LIST_HEAD(&client->event_list); 123 INIT_LIST_HEAD(&client->event_list);
122 INIT_LIST_HEAD(&client->resource_list); 124 INIT_LIST_HEAD(&client->resource_list);
123 spin_lock_init(&client->lock); 125 spin_lock_init(&client->lock);
@@ -644,6 +646,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
644 struct fw_cdev_create_iso_context *request = buffer; 646 struct fw_cdev_create_iso_context *request = buffer;
645 struct fw_iso_context *context; 647 struct fw_iso_context *context;
646 648
649 /* We only support one context at this time. */
650 if (client->iso_context != NULL)
651 return -EBUSY;
652
647 if (request->channel > 63) 653 if (request->channel > 63)
648 return -EINVAL; 654 return -EINVAL;
649 655
@@ -790,8 +796,9 @@ static int ioctl_start_iso(struct client *client, void *buffer)
790{ 796{
791 struct fw_cdev_start_iso *request = buffer; 797 struct fw_cdev_start_iso *request = buffer;
792 798
793 if (request->handle != 0) 799 if (client->iso_context == NULL || request->handle != 0)
794 return -EINVAL; 800 return -EINVAL;
801
795 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { 802 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
796 if (request->tags == 0 || request->tags > 15) 803 if (request->tags == 0 || request->tags > 15)
797 return -EINVAL; 804 return -EINVAL;
@@ -808,7 +815,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer)
808{ 815{
809 struct fw_cdev_stop_iso *request = buffer; 816 struct fw_cdev_stop_iso *request = buffer;
810 817
811 if (request->handle != 0) 818 if (client->iso_context == NULL || request->handle != 0)
812 return -EINVAL; 819 return -EINVAL;
813 820
814 return fw_iso_context_stop(client->iso_context); 821 return fw_iso_context_stop(client->iso_context);
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
index de9066e69adf..870125a3638e 100644
--- a/drivers/firewire/fw-device.c
+++ b/drivers/firewire/fw-device.c
@@ -150,21 +150,10 @@ struct bus_type fw_bus_type = {
150}; 150};
151EXPORT_SYMBOL(fw_bus_type); 151EXPORT_SYMBOL(fw_bus_type);
152 152
153struct fw_device *fw_device_get(struct fw_device *device)
154{
155 get_device(&device->device);
156
157 return device;
158}
159
160void fw_device_put(struct fw_device *device)
161{
162 put_device(&device->device);
163}
164
165static void fw_device_release(struct device *dev) 153static void fw_device_release(struct device *dev)
166{ 154{
167 struct fw_device *device = fw_device(dev); 155 struct fw_device *device = fw_device(dev);
156 struct fw_card *card = device->card;
168 unsigned long flags; 157 unsigned long flags;
169 158
170 /* 159 /*
@@ -176,9 +165,9 @@ static void fw_device_release(struct device *dev)
176 spin_unlock_irqrestore(&device->card->lock, flags); 165 spin_unlock_irqrestore(&device->card->lock, flags);
177 166
178 fw_node_put(device->node); 167 fw_node_put(device->node);
179 fw_card_put(device->card);
180 kfree(device->config_rom); 168 kfree(device->config_rom);
181 kfree(device); 169 kfree(device);
170 atomic_dec(&card->device_count);
182} 171}
183 172
184int fw_device_enable_phys_dma(struct fw_device *device) 173int fw_device_enable_phys_dma(struct fw_device *device)
@@ -358,12 +347,9 @@ static ssize_t
358guid_show(struct device *dev, struct device_attribute *attr, char *buf) 347guid_show(struct device *dev, struct device_attribute *attr, char *buf)
359{ 348{
360 struct fw_device *device = fw_device(dev); 349 struct fw_device *device = fw_device(dev);
361 u64 guid;
362
363 guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4];
364 350
365 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 351 return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
366 (unsigned long long)guid); 352 device->config_rom[3], device->config_rom[4]);
367} 353}
368 354
369static struct device_attribute fw_device_attributes[] = { 355static struct device_attribute fw_device_attributes[] = {
@@ -610,12 +596,14 @@ static DECLARE_RWSEM(idr_rwsem);
610static DEFINE_IDR(fw_device_idr); 596static DEFINE_IDR(fw_device_idr);
611int fw_cdev_major; 597int fw_cdev_major;
612 598
613struct fw_device *fw_device_from_devt(dev_t devt) 599struct fw_device *fw_device_get_by_devt(dev_t devt)
614{ 600{
615 struct fw_device *device; 601 struct fw_device *device;
616 602
617 down_read(&idr_rwsem); 603 down_read(&idr_rwsem);
618 device = idr_find(&fw_device_idr, MINOR(devt)); 604 device = idr_find(&fw_device_idr, MINOR(devt));
605 if (device)
606 fw_device_get(device);
619 up_read(&idr_rwsem); 607 up_read(&idr_rwsem);
620 608
621 return device; 609 return device;
@@ -627,13 +615,14 @@ static void fw_device_shutdown(struct work_struct *work)
627 container_of(work, struct fw_device, work.work); 615 container_of(work, struct fw_device, work.work);
628 int minor = MINOR(device->device.devt); 616 int minor = MINOR(device->device.devt);
629 617
630 down_write(&idr_rwsem);
631 idr_remove(&fw_device_idr, minor);
632 up_write(&idr_rwsem);
633
634 fw_device_cdev_remove(device); 618 fw_device_cdev_remove(device);
635 device_for_each_child(&device->device, NULL, shutdown_unit); 619 device_for_each_child(&device->device, NULL, shutdown_unit);
636 device_unregister(&device->device); 620 device_unregister(&device->device);
621
622 down_write(&idr_rwsem);
623 idr_remove(&fw_device_idr, minor);
624 up_write(&idr_rwsem);
625 fw_device_put(device);
637} 626}
638 627
639static struct device_type fw_device_type = { 628static struct device_type fw_device_type = {
@@ -668,7 +657,8 @@ static void fw_device_init(struct work_struct *work)
668 */ 657 */
669 658
670 if (read_bus_info_block(device, device->generation) < 0) { 659 if (read_bus_info_block(device, device->generation) < 0) {
671 if (device->config_rom_retries < MAX_RETRIES) { 660 if (device->config_rom_retries < MAX_RETRIES &&
661 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
672 device->config_rom_retries++; 662 device->config_rom_retries++;
673 schedule_delayed_work(&device->work, RETRY_DELAY); 663 schedule_delayed_work(&device->work, RETRY_DELAY);
674 } else { 664 } else {
@@ -682,10 +672,13 @@ static void fw_device_init(struct work_struct *work)
682 } 672 }
683 673
684 err = -ENOMEM; 674 err = -ENOMEM;
675
676 fw_device_get(device);
685 down_write(&idr_rwsem); 677 down_write(&idr_rwsem);
686 if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) 678 if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
687 err = idr_get_new(&fw_device_idr, device, &minor); 679 err = idr_get_new(&fw_device_idr, device, &minor);
688 up_write(&idr_rwsem); 680 up_write(&idr_rwsem);
681
689 if (err < 0) 682 if (err < 0)
690 goto error; 683 goto error;
691 684
@@ -717,13 +710,22 @@ static void fw_device_init(struct work_struct *work)
717 */ 710 */
718 if (atomic_cmpxchg(&device->state, 711 if (atomic_cmpxchg(&device->state,
719 FW_DEVICE_INITIALIZING, 712 FW_DEVICE_INITIALIZING,
720 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) 713 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) {
721 fw_device_shutdown(&device->work.work); 714 fw_device_shutdown(&device->work.work);
722 else 715 } else {
723 fw_notify("created new fw device %s " 716 if (device->config_rom_retries)
724 "(%d config rom retries, S%d00)\n", 717 fw_notify("created device %s: GUID %08x%08x, S%d00, "
725 device->device.bus_id, device->config_rom_retries, 718 "%d config ROM retries\n",
726 1 << device->max_speed); 719 device->device.bus_id,
720 device->config_rom[3], device->config_rom[4],
721 1 << device->max_speed,
722 device->config_rom_retries);
723 else
724 fw_notify("created device %s: GUID %08x%08x, S%d00\n",
725 device->device.bus_id,
726 device->config_rom[3], device->config_rom[4],
727 1 << device->max_speed);
728 }
727 729
728 /* 730 /*
729 * Reschedule the IRM work if we just finished reading the 731 * Reschedule the IRM work if we just finished reading the
@@ -741,7 +743,9 @@ static void fw_device_init(struct work_struct *work)
741 idr_remove(&fw_device_idr, minor); 743 idr_remove(&fw_device_idr, minor);
742 up_write(&idr_rwsem); 744 up_write(&idr_rwsem);
743 error: 745 error:
744 put_device(&device->device); 746 fw_device_put(device); /* fw_device_idr's reference */
747
748 put_device(&device->device); /* our reference */
745} 749}
746 750
747static int update_unit(struct device *dev, void *data) 751static int update_unit(struct device *dev, void *data)
@@ -791,7 +795,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
791 */ 795 */
792 device_initialize(&device->device); 796 device_initialize(&device->device);
793 atomic_set(&device->state, FW_DEVICE_INITIALIZING); 797 atomic_set(&device->state, FW_DEVICE_INITIALIZING);
794 device->card = fw_card_get(card); 798 atomic_inc(&card->device_count);
799 device->card = card;
795 device->node = fw_node_get(node); 800 device->node = fw_node_get(node);
796 device->node_id = node->node_id; 801 device->node_id = node->node_id;
797 device->generation = card->generation; 802 device->generation = card->generation;
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
index 0854fe2bc110..78ecd3991b7f 100644
--- a/drivers/firewire/fw-device.h
+++ b/drivers/firewire/fw-device.h
@@ -76,14 +76,26 @@ fw_device_is_shutdown(struct fw_device *device)
76 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; 76 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
77} 77}
78 78
79struct fw_device *fw_device_get(struct fw_device *device); 79static inline struct fw_device *
80void fw_device_put(struct fw_device *device); 80fw_device_get(struct fw_device *device)
81{
82 get_device(&device->device);
83
84 return device;
85}
86
87static inline void
88fw_device_put(struct fw_device *device)
89{
90 put_device(&device->device);
91}
92
93struct fw_device *fw_device_get_by_devt(dev_t devt);
81int fw_device_enable_phys_dma(struct fw_device *device); 94int fw_device_enable_phys_dma(struct fw_device *device);
82 95
83void fw_device_cdev_update(struct fw_device *device); 96void fw_device_cdev_update(struct fw_device *device);
84void fw_device_cdev_remove(struct fw_device *device); 97void fw_device_cdev_remove(struct fw_device *device);
85 98
86struct fw_device *fw_device_from_devt(dev_t devt);
87extern int fw_cdev_major; 99extern int fw_cdev_major;
88 100
89struct fw_unit { 101struct fw_unit {
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 19ece9b6d742..03069a454c07 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -28,14 +28,15 @@
28 * and many others. 28 * and many others.
29 */ 29 */
30 30
31#include <linux/blkdev.h>
32#include <linux/delay.h>
33#include <linux/device.h>
34#include <linux/dma-mapping.h>
31#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/mod_devicetable.h>
32#include <linux/module.h> 37#include <linux/module.h>
33#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
34#include <linux/mod_devicetable.h>
35#include <linux/device.h>
36#include <linux/scatterlist.h> 39#include <linux/scatterlist.h>
37#include <linux/dma-mapping.h>
38#include <linux/blkdev.h>
39#include <linux/string.h> 40#include <linux/string.h>
40#include <linux/stringify.h> 41#include <linux/stringify.h>
41#include <linux/timer.h> 42#include <linux/timer.h>
@@ -47,9 +48,9 @@
47#include <scsi/scsi_device.h> 48#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h> 49#include <scsi/scsi_host.h>
49 50
50#include "fw-transaction.h"
51#include "fw-topology.h"
52#include "fw-device.h" 51#include "fw-device.h"
52#include "fw-topology.h"
53#include "fw-transaction.h"
53 54
54/* 55/*
55 * So far only bridges from Oxford Semiconductor are known to support 56 * So far only bridges from Oxford Semiconductor are known to support
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
82 * Avoids access beyond actual disk limits on devices with an off-by-one bug. 83 * Avoids access beyond actual disk limits on devices with an off-by-one bug.
83 * Don't use this with devices which don't have this bug. 84 * Don't use this with devices which don't have this bug.
84 * 85 *
86 * - delay inquiry
87 * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
88 *
85 * - override internal blacklist 89 * - override internal blacklist
86 * Instead of adding to the built-in blacklist, use only the workarounds 90 * Instead of adding to the built-in blacklist, use only the workarounds
87 * specified in the module load parameter. 91 * specified in the module load parameter.
@@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
91#define SBP2_WORKAROUND_INQUIRY_36 0x2 95#define SBP2_WORKAROUND_INQUIRY_36 0x2
92#define SBP2_WORKAROUND_MODE_SENSE_8 0x4 96#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
93#define SBP2_WORKAROUND_FIX_CAPACITY 0x8 97#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
98#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10
99#define SBP2_INQUIRY_DELAY 12
94#define SBP2_WORKAROUND_OVERRIDE 0x100 100#define SBP2_WORKAROUND_OVERRIDE 0x100
95 101
96static int sbp2_param_workarounds; 102static int sbp2_param_workarounds;
@@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
100 ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) 106 ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
101 ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) 107 ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
102 ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) 108 ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
109 ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
103 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) 110 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
104 ", or a combination)"); 111 ", or a combination)");
105 112
@@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2";
115struct sbp2_logical_unit { 122struct sbp2_logical_unit {
116 struct sbp2_target *tgt; 123 struct sbp2_target *tgt;
117 struct list_head link; 124 struct list_head link;
118 struct scsi_device *sdev;
119 struct fw_address_handler address_handler; 125 struct fw_address_handler address_handler;
120 struct list_head orb_list; 126 struct list_head orb_list;
121 127
@@ -132,6 +138,8 @@ struct sbp2_logical_unit {
132 int generation; 138 int generation;
133 int retries; 139 int retries;
134 struct delayed_work work; 140 struct delayed_work work;
141 bool has_sdev;
142 bool blocked;
135}; 143};
136 144
137/* 145/*
@@ -141,16 +149,18 @@ struct sbp2_logical_unit {
141struct sbp2_target { 149struct sbp2_target {
142 struct kref kref; 150 struct kref kref;
143 struct fw_unit *unit; 151 struct fw_unit *unit;
152 const char *bus_id;
153 struct list_head lu_list;
144 154
145 u64 management_agent_address; 155 u64 management_agent_address;
146 int directory_id; 156 int directory_id;
147 int node_id; 157 int node_id;
148 int address_high; 158 int address_high;
149 159 unsigned int workarounds;
150 unsigned workarounds;
151 struct list_head lu_list;
152
153 unsigned int mgt_orb_timeout; 160 unsigned int mgt_orb_timeout;
161
162 int dont_block; /* counter for each logical unit */
163 int blocked; /* ditto */
154}; 164};
155 165
156/* 166/*
@@ -160,7 +170,7 @@ struct sbp2_target {
160 */ 170 */
161#define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ 171#define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */
162#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ 172#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */
163#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ 173#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
164#define SBP2_ORB_NULL 0x80000000 174#define SBP2_ORB_NULL 0x80000000
165#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 175#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
166 176
@@ -297,7 +307,7 @@ struct sbp2_command_orb {
297static const struct { 307static const struct {
298 u32 firmware_revision; 308 u32 firmware_revision;
299 u32 model; 309 u32 model;
300 unsigned workarounds; 310 unsigned int workarounds;
301} sbp2_workarounds_table[] = { 311} sbp2_workarounds_table[] = {
302 /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { 312 /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
303 .firmware_revision = 0x002800, 313 .firmware_revision = 0x002800,
@@ -305,6 +315,11 @@ static const struct {
305 .workarounds = SBP2_WORKAROUND_INQUIRY_36 | 315 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
306 SBP2_WORKAROUND_MODE_SENSE_8, 316 SBP2_WORKAROUND_MODE_SENSE_8,
307 }, 317 },
318 /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
319 .firmware_revision = 0x002800,
320 .model = 0x000000,
321 .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY,
322 },
308 /* Initio bridges, actually only needed for some older ones */ { 323 /* Initio bridges, actually only needed for some older ones */ {
309 .firmware_revision = 0x000200, 324 .firmware_revision = 0x000200,
310 .model = ~0, 325 .model = ~0,
@@ -501,6 +516,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
501 unsigned int timeout; 516 unsigned int timeout;
502 int retval = -ENOMEM; 517 int retval = -ENOMEM;
503 518
519 if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
520 return 0;
521
504 orb = kzalloc(sizeof(*orb), GFP_ATOMIC); 522 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
505 if (orb == NULL) 523 if (orb == NULL)
506 return -ENOMEM; 524 return -ENOMEM;
@@ -553,20 +571,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
553 571
554 retval = -EIO; 572 retval = -EIO;
555 if (sbp2_cancel_orbs(lu) == 0) { 573 if (sbp2_cancel_orbs(lu) == 0) {
556 fw_error("orb reply timed out, rcode=0x%02x\n", 574 fw_error("%s: orb reply timed out, rcode=0x%02x\n",
557 orb->base.rcode); 575 lu->tgt->bus_id, orb->base.rcode);
558 goto out; 576 goto out;
559 } 577 }
560 578
561 if (orb->base.rcode != RCODE_COMPLETE) { 579 if (orb->base.rcode != RCODE_COMPLETE) {
562 fw_error("management write failed, rcode 0x%02x\n", 580 fw_error("%s: management write failed, rcode 0x%02x\n",
563 orb->base.rcode); 581 lu->tgt->bus_id, orb->base.rcode);
564 goto out; 582 goto out;
565 } 583 }
566 584
567 if (STATUS_GET_RESPONSE(orb->status) != 0 || 585 if (STATUS_GET_RESPONSE(orb->status) != 0 ||
568 STATUS_GET_SBP_STATUS(orb->status) != 0) { 586 STATUS_GET_SBP_STATUS(orb->status) != 0) {
569 fw_error("error status: %d:%d\n", 587 fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id,
570 STATUS_GET_RESPONSE(orb->status), 588 STATUS_GET_RESPONSE(orb->status),
571 STATUS_GET_SBP_STATUS(orb->status)); 589 STATUS_GET_SBP_STATUS(orb->status));
572 goto out; 590 goto out;
@@ -590,29 +608,158 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
590 608
591static void 609static void
592complete_agent_reset_write(struct fw_card *card, int rcode, 610complete_agent_reset_write(struct fw_card *card, int rcode,
593 void *payload, size_t length, void *data) 611 void *payload, size_t length, void *done)
594{ 612{
595 struct fw_transaction *t = data; 613 complete(done);
614}
596 615
597 kfree(t); 616static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
617{
618 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
619 DECLARE_COMPLETION_ONSTACK(done);
620 struct fw_transaction t;
621 static u32 z;
622
623 fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST,
624 lu->tgt->node_id, lu->generation, device->max_speed,
625 lu->command_block_agent_address + SBP2_AGENT_RESET,
626 &z, sizeof(z), complete_agent_reset_write, &done);
627 wait_for_completion(&done);
628}
629
630static void
631complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
632 void *payload, size_t length, void *data)
633{
634 kfree(data);
598} 635}
599 636
600static int sbp2_agent_reset(struct sbp2_logical_unit *lu) 637static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
601{ 638{
602 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 639 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
603 struct fw_transaction *t; 640 struct fw_transaction *t;
604 static u32 zero; 641 static u32 z;
605 642
606 t = kzalloc(sizeof(*t), GFP_ATOMIC); 643 t = kmalloc(sizeof(*t), GFP_ATOMIC);
607 if (t == NULL) 644 if (t == NULL)
608 return -ENOMEM; 645 return;
609 646
610 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, 647 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
611 lu->tgt->node_id, lu->generation, device->max_speed, 648 lu->tgt->node_id, lu->generation, device->max_speed,
612 lu->command_block_agent_address + SBP2_AGENT_RESET, 649 lu->command_block_agent_address + SBP2_AGENT_RESET,
613 &zero, sizeof(zero), complete_agent_reset_write, t); 650 &z, sizeof(z), complete_agent_reset_write_no_wait, t);
651}
614 652
615 return 0; 653static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation)
654{
655 struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card;
656 unsigned long flags;
657
658 /* serialize with comparisons of lu->generation and card->generation */
659 spin_lock_irqsave(&card->lock, flags);
660 lu->generation = generation;
661 spin_unlock_irqrestore(&card->lock, flags);
662}
663
664static inline void sbp2_allow_block(struct sbp2_logical_unit *lu)
665{
666 /*
667 * We may access dont_block without taking card->lock here:
668 * All callers of sbp2_allow_block() and all callers of sbp2_unblock()
669 * are currently serialized against each other.
670 * And a wrong result in sbp2_conditionally_block()'s access of
671 * dont_block is rather harmless, it simply misses its first chance.
672 */
673 --lu->tgt->dont_block;
674}
675
676/*
677 * Blocks lu->tgt if all of the following conditions are met:
678 * - Login, INQUIRY, and high-level SCSI setup of all of the target's
679 * logical units have been finished (indicated by dont_block == 0).
680 * - lu->generation is stale.
681 *
682 * Note, scsi_block_requests() must be called while holding card->lock,
683 * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
684 * unblock the target.
685 */
686static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
687{
688 struct sbp2_target *tgt = lu->tgt;
689 struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
690 struct Scsi_Host *shost =
691 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
692 unsigned long flags;
693
694 spin_lock_irqsave(&card->lock, flags);
695 if (!tgt->dont_block && !lu->blocked &&
696 lu->generation != card->generation) {
697 lu->blocked = true;
698 if (++tgt->blocked == 1) {
699 scsi_block_requests(shost);
700 fw_notify("blocked %s\n", lu->tgt->bus_id);
701 }
702 }
703 spin_unlock_irqrestore(&card->lock, flags);
704}
705
706/*
707 * Unblocks lu->tgt as soon as all its logical units can be unblocked.
708 * Note, it is harmless to run scsi_unblock_requests() outside the
709 * card->lock protected section. On the other hand, running it inside
710 * the section might clash with shost->host_lock.
711 */
712static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
713{
714 struct sbp2_target *tgt = lu->tgt;
715 struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
716 struct Scsi_Host *shost =
717 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
718 unsigned long flags;
719 bool unblock = false;
720
721 spin_lock_irqsave(&card->lock, flags);
722 if (lu->blocked && lu->generation == card->generation) {
723 lu->blocked = false;
724 unblock = --tgt->blocked == 0;
725 }
726 spin_unlock_irqrestore(&card->lock, flags);
727
728 if (unblock) {
729 scsi_unblock_requests(shost);
730 fw_notify("unblocked %s\n", lu->tgt->bus_id);
731 }
732}
733
734/*
735 * Prevents future blocking of tgt and unblocks it.
736 * Note, it is harmless to run scsi_unblock_requests() outside the
737 * card->lock protected section. On the other hand, running it inside
738 * the section might clash with shost->host_lock.
739 */
740static void sbp2_unblock(struct sbp2_target *tgt)
741{
742 struct fw_card *card = fw_device(tgt->unit->device.parent)->card;
743 struct Scsi_Host *shost =
744 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
745 unsigned long flags;
746
747 spin_lock_irqsave(&card->lock, flags);
748 ++tgt->dont_block;
749 spin_unlock_irqrestore(&card->lock, flags);
750
751 scsi_unblock_requests(shost);
752}
753
754static int sbp2_lun2int(u16 lun)
755{
756 struct scsi_lun eight_bytes_lun;
757
758 memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
759 eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
760 eight_bytes_lun.scsi_lun[1] = lun & 0xff;
761
762 return scsilun_to_int(&eight_bytes_lun);
616} 763}
617 764
618static void sbp2_release_target(struct kref *kref) 765static void sbp2_release_target(struct kref *kref)
@@ -621,26 +768,31 @@ static void sbp2_release_target(struct kref *kref)
621 struct sbp2_logical_unit *lu, *next; 768 struct sbp2_logical_unit *lu, *next;
622 struct Scsi_Host *shost = 769 struct Scsi_Host *shost =
623 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); 770 container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
771 struct scsi_device *sdev;
624 struct fw_device *device = fw_device(tgt->unit->device.parent); 772 struct fw_device *device = fw_device(tgt->unit->device.parent);
625 773
626 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { 774 /* prevent deadlocks */
627 if (lu->sdev) 775 sbp2_unblock(tgt);
628 scsi_remove_device(lu->sdev);
629 776
630 if (!fw_device_is_shutdown(device)) 777 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
631 sbp2_send_management_orb(lu, tgt->node_id, 778 sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
632 lu->generation, SBP2_LOGOUT_REQUEST, 779 if (sdev) {
633 lu->login_id, NULL); 780 scsi_remove_device(sdev);
781 scsi_device_put(sdev);
782 }
783 sbp2_send_management_orb(lu, tgt->node_id, lu->generation,
784 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
634 785
635 fw_core_remove_address_handler(&lu->address_handler); 786 fw_core_remove_address_handler(&lu->address_handler);
636 list_del(&lu->link); 787 list_del(&lu->link);
637 kfree(lu); 788 kfree(lu);
638 } 789 }
639 scsi_remove_host(shost); 790 scsi_remove_host(shost);
640 fw_notify("released %s\n", tgt->unit->device.bus_id); 791 fw_notify("released %s\n", tgt->bus_id);
641 792
642 put_device(&tgt->unit->device); 793 put_device(&tgt->unit->device);
643 scsi_host_put(shost); 794 scsi_host_put(shost);
795 fw_device_put(device);
644} 796}
645 797
646static struct workqueue_struct *sbp2_wq; 798static struct workqueue_struct *sbp2_wq;
@@ -666,33 +818,42 @@ static void sbp2_login(struct work_struct *work)
666{ 818{
667 struct sbp2_logical_unit *lu = 819 struct sbp2_logical_unit *lu =
668 container_of(work, struct sbp2_logical_unit, work.work); 820 container_of(work, struct sbp2_logical_unit, work.work);
669 struct Scsi_Host *shost = 821 struct sbp2_target *tgt = lu->tgt;
670 container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]); 822 struct fw_device *device = fw_device(tgt->unit->device.parent);
823 struct Scsi_Host *shost;
671 struct scsi_device *sdev; 824 struct scsi_device *sdev;
672 struct scsi_lun eight_bytes_lun;
673 struct fw_unit *unit = lu->tgt->unit;
674 struct fw_device *device = fw_device(unit->device.parent);
675 struct sbp2_login_response response; 825 struct sbp2_login_response response;
676 int generation, node_id, local_node_id; 826 int generation, node_id, local_node_id;
677 827
828 if (fw_device_is_shutdown(device))
829 goto out;
830
678 generation = device->generation; 831 generation = device->generation;
679 smp_rmb(); /* node_id must not be older than generation */ 832 smp_rmb(); /* node_id must not be older than generation */
680 node_id = device->node_id; 833 node_id = device->node_id;
681 local_node_id = device->card->node_id; 834 local_node_id = device->card->node_id;
682 835
836 /* If this is a re-login attempt, log out, or we might be rejected. */
837 if (lu->has_sdev)
838 sbp2_send_management_orb(lu, device->node_id, generation,
839 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
840
683 if (sbp2_send_management_orb(lu, node_id, generation, 841 if (sbp2_send_management_orb(lu, node_id, generation,
684 SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { 842 SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
685 if (lu->retries++ < 5) 843 if (lu->retries++ < 5) {
686 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); 844 sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
687 else 845 } else {
688 fw_error("failed to login to %s LUN %04x\n", 846 fw_error("%s: failed to login to LUN %04x\n",
689 unit->device.bus_id, lu->lun); 847 tgt->bus_id, lu->lun);
848 /* Let any waiting I/O fail from now on. */
849 sbp2_unblock(lu->tgt);
850 }
690 goto out; 851 goto out;
691 } 852 }
692 853
693 lu->generation = generation; 854 tgt->node_id = node_id;
694 lu->tgt->node_id = node_id; 855 tgt->address_high = local_node_id << 16;
695 lu->tgt->address_high = local_node_id << 16; 856 sbp2_set_generation(lu, generation);
696 857
697 /* Get command block agent offset and login id. */ 858 /* Get command block agent offset and login id. */
698 lu->command_block_agent_address = 859 lu->command_block_agent_address =
@@ -700,8 +861,8 @@ static void sbp2_login(struct work_struct *work)
700 response.command_block_agent.low; 861 response.command_block_agent.low;
701 lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); 862 lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
702 863
703 fw_notify("logged in to %s LUN %04x (%d retries)\n", 864 fw_notify("%s: logged in to LUN %04x (%d retries)\n",
704 unit->device.bus_id, lu->lun, lu->retries); 865 tgt->bus_id, lu->lun, lu->retries);
705 866
706#if 0 867#if 0
707 /* FIXME: The linux1394 sbp2 does this last step. */ 868 /* FIXME: The linux1394 sbp2 does this last step. */
@@ -711,26 +872,58 @@ static void sbp2_login(struct work_struct *work)
711 PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); 872 PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
712 sbp2_agent_reset(lu); 873 sbp2_agent_reset(lu);
713 874
714 memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); 875 /* This was a re-login. */
715 eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff; 876 if (lu->has_sdev) {
716 eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff; 877 sbp2_cancel_orbs(lu);
878 sbp2_conditionally_unblock(lu);
879 goto out;
880 }
717 881
718 sdev = __scsi_add_device(shost, 0, 0, 882 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
719 scsilun_to_int(&eight_bytes_lun), lu); 883 ssleep(SBP2_INQUIRY_DELAY);
720 if (IS_ERR(sdev)) { 884
721 sbp2_send_management_orb(lu, node_id, generation, 885 shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
722 SBP2_LOGOUT_REQUEST, lu->login_id, NULL); 886 sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
723 /* 887 /*
724 * Set this back to sbp2_login so we fall back and 888 * FIXME: We are unable to perform reconnects while in sbp2_login().
725 * retry login on bus reset. 889 * Therefore __scsi_add_device() will get into trouble if a bus reset
726 */ 890 * happens in parallel. It will either fail or leave us with an
727 PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 891 * unusable sdev. As a workaround we check for this and retry the
728 } else { 892 * whole login and SCSI probing.
729 lu->sdev = sdev; 893 */
894
895 /* Reported error during __scsi_add_device() */
896 if (IS_ERR(sdev))
897 goto out_logout_login;
898
899 /* Unreported error during __scsi_add_device() */
900 smp_rmb(); /* get current card generation */
901 if (generation != device->card->generation) {
902 scsi_remove_device(sdev);
730 scsi_device_put(sdev); 903 scsi_device_put(sdev);
904 goto out_logout_login;
731 } 905 }
906
907 /* No error during __scsi_add_device() */
908 lu->has_sdev = true;
909 scsi_device_put(sdev);
910 sbp2_allow_block(lu);
911 goto out;
912
913 out_logout_login:
914 smp_rmb(); /* generation may have changed */
915 generation = device->generation;
916 smp_rmb(); /* node_id must not be older than generation */
917
918 sbp2_send_management_orb(lu, device->node_id, generation,
919 SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
920 /*
921 * If a bus reset happened, sbp2_update will have requeued
922 * lu->work already. Reset the work from reconnect to login.
923 */
924 PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
732 out: 925 out:
733 sbp2_target_put(lu->tgt); 926 sbp2_target_put(tgt);
734} 927}
735 928
736static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) 929static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
@@ -751,10 +944,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
751 return -ENOMEM; 944 return -ENOMEM;
752 } 945 }
753 946
754 lu->tgt = tgt; 947 lu->tgt = tgt;
755 lu->sdev = NULL; 948 lu->lun = lun_entry & 0xffff;
756 lu->lun = lun_entry & 0xffff; 949 lu->retries = 0;
757 lu->retries = 0; 950 lu->has_sdev = false;
951 lu->blocked = false;
952 ++tgt->dont_block;
758 INIT_LIST_HEAD(&lu->orb_list); 953 INIT_LIST_HEAD(&lu->orb_list);
759 INIT_DELAYED_WORK(&lu->work, sbp2_login); 954 INIT_DELAYED_WORK(&lu->work, sbp2_login);
760 955
@@ -813,7 +1008,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
813 if (timeout > tgt->mgt_orb_timeout) 1008 if (timeout > tgt->mgt_orb_timeout)
814 fw_notify("%s: config rom contains %ds " 1009 fw_notify("%s: config rom contains %ds "
815 "management ORB timeout, limiting " 1010 "management ORB timeout, limiting "
816 "to %ds\n", tgt->unit->device.bus_id, 1011 "to %ds\n", tgt->bus_id,
817 timeout / 1000, 1012 timeout / 1000,
818 tgt->mgt_orb_timeout / 1000); 1013 tgt->mgt_orb_timeout / 1000);
819 break; 1014 break;
@@ -836,12 +1031,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
836 u32 firmware_revision) 1031 u32 firmware_revision)
837{ 1032{
838 int i; 1033 int i;
839 unsigned w = sbp2_param_workarounds; 1034 unsigned int w = sbp2_param_workarounds;
840 1035
841 if (w) 1036 if (w)
842 fw_notify("Please notify linux1394-devel@lists.sourceforge.net " 1037 fw_notify("Please notify linux1394-devel@lists.sourceforge.net "
843 "if you need the workarounds parameter for %s\n", 1038 "if you need the workarounds parameter for %s\n",
844 tgt->unit->device.bus_id); 1039 tgt->bus_id);
845 1040
846 if (w & SBP2_WORKAROUND_OVERRIDE) 1041 if (w & SBP2_WORKAROUND_OVERRIDE)
847 goto out; 1042 goto out;
@@ -863,8 +1058,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
863 if (w) 1058 if (w)
864 fw_notify("Workarounds for %s: 0x%x " 1059 fw_notify("Workarounds for %s: 0x%x "
865 "(firmware_revision 0x%06x, model_id 0x%06x)\n", 1060 "(firmware_revision 0x%06x, model_id 0x%06x)\n",
866 tgt->unit->device.bus_id, 1061 tgt->bus_id, w, firmware_revision, model);
867 w, firmware_revision, model);
868 tgt->workarounds = w; 1062 tgt->workarounds = w;
869} 1063}
870 1064
@@ -888,6 +1082,7 @@ static int sbp2_probe(struct device *dev)
888 tgt->unit = unit; 1082 tgt->unit = unit;
889 kref_init(&tgt->kref); 1083 kref_init(&tgt->kref);
890 INIT_LIST_HEAD(&tgt->lu_list); 1084 INIT_LIST_HEAD(&tgt->lu_list);
1085 tgt->bus_id = unit->device.bus_id;
891 1086
892 if (fw_device_enable_phys_dma(device) < 0) 1087 if (fw_device_enable_phys_dma(device) < 0)
893 goto fail_shost_put; 1088 goto fail_shost_put;
@@ -895,6 +1090,8 @@ static int sbp2_probe(struct device *dev)
895 if (scsi_add_host(shost, &unit->device) < 0) 1090 if (scsi_add_host(shost, &unit->device) < 0)
896 goto fail_shost_put; 1091 goto fail_shost_put;
897 1092
1093 fw_device_get(device);
1094
898 /* Initialize to values that won't match anything in our table. */ 1095 /* Initialize to values that won't match anything in our table. */
899 firmware_revision = 0xff000000; 1096 firmware_revision = 0xff000000;
900 model = 0xff000000; 1097 model = 0xff000000;
@@ -938,10 +1135,13 @@ static void sbp2_reconnect(struct work_struct *work)
938{ 1135{
939 struct sbp2_logical_unit *lu = 1136 struct sbp2_logical_unit *lu =
940 container_of(work, struct sbp2_logical_unit, work.work); 1137 container_of(work, struct sbp2_logical_unit, work.work);
941 struct fw_unit *unit = lu->tgt->unit; 1138 struct sbp2_target *tgt = lu->tgt;
942 struct fw_device *device = fw_device(unit->device.parent); 1139 struct fw_device *device = fw_device(tgt->unit->device.parent);
943 int generation, node_id, local_node_id; 1140 int generation, node_id, local_node_id;
944 1141
1142 if (fw_device_is_shutdown(device))
1143 goto out;
1144
945 generation = device->generation; 1145 generation = device->generation;
946 smp_rmb(); /* node_id must not be older than generation */ 1146 smp_rmb(); /* node_id must not be older than generation */
947 node_id = device->node_id; 1147 node_id = device->node_id;
@@ -950,10 +1150,17 @@ static void sbp2_reconnect(struct work_struct *work)
950 if (sbp2_send_management_orb(lu, node_id, generation, 1150 if (sbp2_send_management_orb(lu, node_id, generation,
951 SBP2_RECONNECT_REQUEST, 1151 SBP2_RECONNECT_REQUEST,
952 lu->login_id, NULL) < 0) { 1152 lu->login_id, NULL) < 0) {
953 if (lu->retries++ >= 5) { 1153 /*
954 fw_error("failed to reconnect to %s\n", 1154 * If reconnect was impossible even though we are in the
955 unit->device.bus_id); 1155 * current generation, fall back and try to log in again.
956 /* Fall back and try to log in again. */ 1156 *
1157 * We could check for "Function rejected" status, but
1158 * looking at the bus generation as simpler and more general.
1159 */
1160 smp_rmb(); /* get current card generation */
1161 if (generation == device->card->generation ||
1162 lu->retries++ >= 5) {
1163 fw_error("%s: failed to reconnect\n", tgt->bus_id);
957 lu->retries = 0; 1164 lu->retries = 0;
958 PREPARE_DELAYED_WORK(&lu->work, sbp2_login); 1165 PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
959 } 1166 }
@@ -961,17 +1168,18 @@ static void sbp2_reconnect(struct work_struct *work)
961 goto out; 1168 goto out;
962 } 1169 }
963 1170
964 lu->generation = generation; 1171 tgt->node_id = node_id;
965 lu->tgt->node_id = node_id; 1172 tgt->address_high = local_node_id << 16;
966 lu->tgt->address_high = local_node_id << 16; 1173 sbp2_set_generation(lu, generation);
967 1174
968 fw_notify("reconnected to %s LUN %04x (%d retries)\n", 1175 fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
969 unit->device.bus_id, lu->lun, lu->retries); 1176 tgt->bus_id, lu->lun, lu->retries);
970 1177
971 sbp2_agent_reset(lu); 1178 sbp2_agent_reset(lu);
972 sbp2_cancel_orbs(lu); 1179 sbp2_cancel_orbs(lu);
1180 sbp2_conditionally_unblock(lu);
973 out: 1181 out:
974 sbp2_target_put(lu->tgt); 1182 sbp2_target_put(tgt);
975} 1183}
976 1184
977static void sbp2_update(struct fw_unit *unit) 1185static void sbp2_update(struct fw_unit *unit)
@@ -986,6 +1194,7 @@ static void sbp2_update(struct fw_unit *unit)
986 * Iteration over tgt->lu_list is therefore safe here. 1194 * Iteration over tgt->lu_list is therefore safe here.
987 */ 1195 */
988 list_for_each_entry(lu, &tgt->lu_list, link) { 1196 list_for_each_entry(lu, &tgt->lu_list, link) {
1197 sbp2_conditionally_block(lu);
989 lu->retries = 0; 1198 lu->retries = 0;
990 sbp2_queue_work(lu, 0); 1199 sbp2_queue_work(lu, 0);
991 } 1200 }
@@ -1063,7 +1272,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
1063 1272
1064 if (status != NULL) { 1273 if (status != NULL) {
1065 if (STATUS_GET_DEAD(*status)) 1274 if (STATUS_GET_DEAD(*status))
1066 sbp2_agent_reset(orb->lu); 1275 sbp2_agent_reset_no_wait(orb->lu);
1067 1276
1068 switch (STATUS_GET_RESPONSE(*status)) { 1277 switch (STATUS_GET_RESPONSE(*status)) {
1069 case SBP2_STATUS_REQUEST_COMPLETE: 1278 case SBP2_STATUS_REQUEST_COMPLETE:
@@ -1089,6 +1298,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
1089 * or when sending the write (less likely). 1298 * or when sending the write (less likely).
1090 */ 1299 */
1091 result = DID_BUS_BUSY << 16; 1300 result = DID_BUS_BUSY << 16;
1301 sbp2_conditionally_block(orb->lu);
1092 } 1302 }
1093 1303
1094 dma_unmap_single(device->card->device, orb->base.request_bus, 1304 dma_unmap_single(device->card->device, orb->base.request_bus,
@@ -1197,7 +1407,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1197 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1407 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1198 struct fw_device *device = fw_device(lu->tgt->unit->device.parent); 1408 struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
1199 struct sbp2_command_orb *orb; 1409 struct sbp2_command_orb *orb;
1200 unsigned max_payload; 1410 unsigned int max_payload;
1201 int retval = SCSI_MLQUEUE_HOST_BUSY; 1411 int retval = SCSI_MLQUEUE_HOST_BUSY;
1202 1412
1203 /* 1413 /*
@@ -1275,6 +1485,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1275{ 1485{
1276 struct sbp2_logical_unit *lu = sdev->hostdata; 1486 struct sbp2_logical_unit *lu = sdev->hostdata;
1277 1487
1488 /* (Re-)Adding logical units via the SCSI stack is not supported. */
1489 if (!lu)
1490 return -ENOSYS;
1491
1278 sdev->allow_restart = 1; 1492 sdev->allow_restart = 1;
1279 1493
1280 /* 1494 /*
@@ -1319,7 +1533,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1319{ 1533{
1320 struct sbp2_logical_unit *lu = cmd->device->hostdata; 1534 struct sbp2_logical_unit *lu = cmd->device->hostdata;
1321 1535
1322 fw_notify("sbp2_scsi_abort\n"); 1536 fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id);
1323 sbp2_agent_reset(lu); 1537 sbp2_agent_reset(lu);
1324 sbp2_cancel_orbs(lu); 1538 sbp2_cancel_orbs(lu);
1325 1539
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 172c1867e9aa..e47bb040197a 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -383,6 +383,7 @@ void fw_destroy_nodes(struct fw_card *card)
383 card->color++; 383 card->color++;
384 if (card->local_node != NULL) 384 if (card->local_node != NULL)
385 for_each_fw_node(card, card->local_node, report_lost_node); 385 for_each_fw_node(card, card->local_node, report_lost_node);
386 card->local_node = NULL;
386 spin_unlock_irqrestore(&card->lock, flags); 387 spin_unlock_irqrestore(&card->lock, flags);
387} 388}
388 389
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
index fa7967b57408..09cb72870454 100644
--- a/drivers/firewire/fw-transaction.h
+++ b/drivers/firewire/fw-transaction.h
@@ -26,6 +26,7 @@
26#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
28#include <linux/firewire-constants.h> 28#include <linux/firewire-constants.h>
29#include <asm/atomic.h>
29 30
30#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) 31#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
31#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) 32#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
@@ -219,6 +220,7 @@ extern struct bus_type fw_bus_type;
219struct fw_card { 220struct fw_card {
220 const struct fw_card_driver *driver; 221 const struct fw_card_driver *driver;
221 struct device *device; 222 struct device *device;
223 atomic_t device_count;
222 struct kref kref; 224 struct kref kref;
223 225
224 int node_id; 226 int node_id;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 653265a40b7f..4072449ad1cd 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -10,10 +10,9 @@
10 10
11static char dmi_empty_string[] = " "; 11static char dmi_empty_string[] = " ";
12 12
13static char * __init dmi_string(const struct dmi_header *dm, u8 s) 13static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
14{ 14{
15 const u8 *bp = ((u8 *) dm) + dm->length; 15 const u8 *bp = ((u8 *) dm) + dm->length;
16 char *str = "";
17 16
18 if (s) { 17 if (s) {
19 s--; 18 s--;
@@ -28,14 +27,29 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
28 27
29 if (!memcmp(bp, dmi_empty_string, cmp_len)) 28 if (!memcmp(bp, dmi_empty_string, cmp_len))
30 return dmi_empty_string; 29 return dmi_empty_string;
31 str = dmi_alloc(len); 30 return bp;
32 if (str != NULL)
33 strcpy(str, bp);
34 else
35 printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
36 } 31 }
37 } 32 }
38 33
34 return "";
35}
36
37static char * __init dmi_string(const struct dmi_header *dm, u8 s)
38{
39 const char *bp = dmi_string_nosave(dm, s);
40 char *str;
41 size_t len;
42
43 if (bp == dmi_empty_string)
44 return dmi_empty_string;
45
46 len = strlen(bp) + 1;
47 str = dmi_alloc(len);
48 if (str != NULL)
49 strcpy(str, bp);
50 else
51 printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
52
39 return str; 53 return str;
40} 54}
41 55
@@ -167,10 +181,30 @@ static void __init dmi_save_type(const struct dmi_header *dm, int slot, int inde
167 dmi_ident[slot] = s; 181 dmi_ident[slot] = s;
168} 182}
169 183
184static void __init dmi_save_one_device(int type, const char *name)
185{
186 struct dmi_device *dev;
187
188 /* No duplicate device */
189 if (dmi_find_device(type, name, NULL))
190 return;
191
192 dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
193 if (!dev) {
194 printk(KERN_ERR "dmi_save_one_device: out of memory.\n");
195 return;
196 }
197
198 dev->type = type;
199 strcpy((char *)(dev + 1), name);
200 dev->name = (char *)(dev + 1);
201 dev->device_data = NULL;
202 list_add(&dev->list, &dmi_devices);
203}
204
170static void __init dmi_save_devices(const struct dmi_header *dm) 205static void __init dmi_save_devices(const struct dmi_header *dm)
171{ 206{
172 int i, count = (dm->length - sizeof(struct dmi_header)) / 2; 207 int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
173 struct dmi_device *dev;
174 208
175 for (i = 0; i < count; i++) { 209 for (i = 0; i < count; i++) {
176 const char *d = (char *)(dm + 1) + (i * 2); 210 const char *d = (char *)(dm + 1) + (i * 2);
@@ -179,23 +213,10 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
179 if ((*d & 0x80) == 0) 213 if ((*d & 0x80) == 0)
180 continue; 214 continue;
181 215
182 dev = dmi_alloc(sizeof(*dev)); 216 dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1)));
183 if (!dev) {
184 printk(KERN_ERR "dmi_save_devices: out of memory.\n");
185 break;
186 }
187
188 dev->type = *d++ & 0x7f;
189 dev->name = dmi_string(dm, *d);
190 dev->device_data = NULL;
191 list_add(&dev->list, &dmi_devices);
192 } 217 }
193} 218}
194 219
195static struct dmi_device empty_oem_string_dev = {
196 .name = dmi_empty_string,
197};
198
199static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) 220static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
200{ 221{
201 int i, count = *(u8 *)(dm + 1); 222 int i, count = *(u8 *)(dm + 1);
@@ -204,10 +225,8 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
204 for (i = 1; i <= count; i++) { 225 for (i = 1; i <= count; i++) {
205 char *devname = dmi_string(dm, i); 226 char *devname = dmi_string(dm, i);
206 227
207 if (!strcmp(devname, dmi_empty_string)) { 228 if (devname == dmi_empty_string)
208 list_add(&empty_oem_string_dev.list, &dmi_devices);
209 continue; 229 continue;
210 }
211 230
212 dev = dmi_alloc(sizeof(*dev)); 231 dev = dmi_alloc(sizeof(*dev));
213 if (!dev) { 232 if (!dev) {
@@ -253,23 +272,12 @@ static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
253static void __init dmi_save_extended_devices(const struct dmi_header *dm) 272static void __init dmi_save_extended_devices(const struct dmi_header *dm)
254{ 273{
255 const u8 *d = (u8*) dm + 5; 274 const u8 *d = (u8*) dm + 5;
256 struct dmi_device *dev;
257 275
258 /* Skip disabled device */ 276 /* Skip disabled device */
259 if ((*d & 0x80) == 0) 277 if ((*d & 0x80) == 0)
260 return; 278 return;
261 279
262 dev = dmi_alloc(sizeof(*dev)); 280 dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d - 1)));
263 if (!dev) {
264 printk(KERN_ERR "dmi_save_extended_devices: out of memory.\n");
265 return;
266 }
267
268 dev->type = *d-- & 0x7f;
269 dev->name = dmi_string(dm, *d);
270 dev->device_data = NULL;
271
272 list_add(&dev->list, &dmi_devices);
273} 281}
274 282
275/* 283/*
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 92583cd4bffd..6e72fd31184d 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -184,6 +184,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
184 gc->direction_output = pca953x_gpio_direction_output; 184 gc->direction_output = pca953x_gpio_direction_output;
185 gc->get = pca953x_gpio_get_value; 185 gc->get = pca953x_gpio_get_value;
186 gc->set = pca953x_gpio_set_value; 186 gc->set = pca953x_gpio_set_value;
187 gc->can_sleep = 1;
187 188
188 gc->base = chip->gpio_start; 189 gc->base = chip->gpio_start;
189 gc->ngpio = gpios; 190 gc->ngpio = gpios;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index b61f56b6f311..476b0bb72d6c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -177,6 +177,8 @@ config I2C_I801
177 ESB2 177 ESB2
178 ICH8 178 ICH8
179 ICH9 179 ICH9
180 Tolapai
181 ICH10
180 182
181 This driver can also be built as a module. If so, the module 183 This driver can also be built as a module. If so, the module
182 will be called i2c-i801. 184 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 573abe440842..2fa43183d375 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -335,7 +335,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
335 u8 temp; 335 u8 temp;
336 336
337 /* driver_data might come from user-space, so check it */ 337 /* driver_data might come from user-space, so check it */
338 if (id->driver_data > ARRAY_SIZE(chipname)) 338 if (id->driver_data >= ARRAY_SIZE(chipname))
339 return -EINVAL; 339 return -EINVAL;
340 340
341 if (amd756_ioport) { 341 if (amd756_ioport) {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index aa9157913b9a..b0f771fe4326 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -40,7 +40,9 @@
40 82801G (ICH7) 0x27da 32 hard yes yes yes 40 82801G (ICH7) 0x27da 32 hard yes yes yes
41 82801H (ICH8) 0x283e 32 hard yes yes yes 41 82801H (ICH8) 0x283e 32 hard yes yes yes
42 82801I (ICH9) 0x2930 32 hard yes yes yes 42 82801I (ICH9) 0x2930 32 hard yes yes yes
43 Tolapai 0x5032 32 hard yes ? ? 43 Tolapai 0x5032 32 hard yes yes yes
44 ICH10 0x3a30 32 hard yes yes yes
45 ICH10 0x3a60 32 hard yes yes yes
44 46
45 Features supported by this driver: 47 Features supported by this driver:
46 Software PEC no 48 Software PEC no
@@ -588,6 +590,8 @@ static struct pci_device_id i801_ids[] = {
588 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) }, 590 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
589 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) }, 591 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
590 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) }, 592 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
593 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
594 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
591 { 0, } 595 { 0, }
592}; 596};
593 597
@@ -608,10 +612,12 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
608 case PCI_DEVICE_ID_INTEL_ESB2_17: 612 case PCI_DEVICE_ID_INTEL_ESB2_17:
609 case PCI_DEVICE_ID_INTEL_ICH8_5: 613 case PCI_DEVICE_ID_INTEL_ICH8_5:
610 case PCI_DEVICE_ID_INTEL_ICH9_6: 614 case PCI_DEVICE_ID_INTEL_ICH9_6:
615 case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
616 case PCI_DEVICE_ID_INTEL_ICH10_4:
617 case PCI_DEVICE_ID_INTEL_ICH10_5:
611 i801_features |= FEATURE_I2C_BLOCK_READ; 618 i801_features |= FEATURE_I2C_BLOCK_READ;
612 /* fall through */ 619 /* fall through */
613 case PCI_DEVICE_ID_INTEL_82801DB_3: 620 case PCI_DEVICE_ID_INTEL_82801DB_3:
614 case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
615 i801_features |= FEATURE_SMBUS_PEC; 621 i801_features |= FEATURE_SMBUS_PEC;
616 i801_features |= FEATURE_BLOCK_BUFFER; 622 i801_features |= FEATURE_BLOCK_BUFFER;
617 break; 623 break;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 5161aaf9341b..496ee875eb4f 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -125,6 +125,13 @@ static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
125 125
126 dev_info(dev, "i/o base %#08lx. irq %d\n", base, irq); 126 dev_info(dev, "i/o base %#08lx. irq %d\n", base, irq);
127 127
128#ifdef CONFIG_PPC_MERGE
129 if (check_legacy_ioport(base)) {
130 dev_err(dev, "I/O address %#08lx is not available\n", base);
131 goto out;
132 }
133#endif
134
128 if (!request_region(base, IO_SIZE, "i2c-pca-isa")) { 135 if (!request_region(base, IO_SIZE, "i2c-pca-isa")) {
129 dev_err(dev, "I/O address %#08lx is in use\n", base); 136 dev_err(dev, "I/O address %#08lx is in use\n", base);
130 goto out; 137 goto out;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index be99c02ecac5..b03af5653c65 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -122,7 +122,7 @@ struct pmcmsptwi_data {
122}; 122};
123 123
124/* The default settings */ 124/* The default settings */
125const static struct pmcmsptwi_clockcfg pmcmsptwi_defclockcfg = { 125static const struct pmcmsptwi_clockcfg pmcmsptwi_defclockcfg = {
126 .standard = { 126 .standard = {
127 .filter = 0x3, 127 .filter = 0x3,
128 .clock = 0x1f, 128 .clock = 0x1f,
@@ -133,7 +133,7 @@ const static struct pmcmsptwi_clockcfg pmcmsptwi_defclockcfg = {
133 }, 133 },
134}; 134};
135 135
136const static struct pmcmsptwi_cfg pmcmsptwi_defcfg = { 136static const struct pmcmsptwi_cfg pmcmsptwi_defcfg = {
137 .arbf = 0x03, 137 .arbf = 0x03,
138 .nak = 0x03, 138 .nak = 0x03,
139 .add10 = 0x00, 139 .add10 = 0x00,
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 2b557bfd7f70..2d2087ad708f 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -999,7 +999,14 @@ static int i2c_pxa_probe(struct platform_device *dev)
999 spin_lock_init(&i2c->lock); 999 spin_lock_init(&i2c->lock);
1000 init_waitqueue_head(&i2c->wait); 1000 init_waitqueue_head(&i2c->wait);
1001 1001
1002 sprintf(i2c->adap.name, "pxa_i2c-i2c.%u", dev->id); 1002 /*
1003 * If "dev->id" is negative we consider it as zero.
1004 * The reason to do so is to avoid sysfs names that only make
1005 * sense when there are multiple adapters.
1006 */
1007 i2c->adap.nr = dev->id != -1 ? dev->id : 0;
1008 snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
1009 i2c->adap.nr);
1003 1010
1004 i2c->clk = clk_get(&dev->dev, "I2CCLK"); 1011 i2c->clk = clk_get(&dev->dev, "I2CCLK");
1005 if (IS_ERR(i2c->clk)) { 1012 if (IS_ERR(i2c->clk)) {
@@ -1050,13 +1057,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
1050 i2c->adap.algo_data = i2c; 1057 i2c->adap.algo_data = i2c;
1051 i2c->adap.dev.parent = &dev->dev; 1058 i2c->adap.dev.parent = &dev->dev;
1052 1059
1053 /*
1054 * If "dev->id" is negative we consider it as zero.
1055 * The reason to do so is to avoid sysfs names that only make
1056 * sense when there are multiple adapters.
1057 */
1058 i2c->adap.nr = dev->id != -1 ? dev->id : 0;
1059
1060 ret = i2c_add_numbered_adapter(&i2c->adap); 1060 ret = i2c_add_numbered_adapter(&i2c->adap);
1061 if (ret < 0) { 1061 if (ret < 0) {
1062 printk(KERN_INFO "I2C: Failed to add bus\n"); 1062 printk(KERN_INFO "I2C: Failed to add bus\n");
@@ -1080,6 +1080,7 @@ eadapt:
1080ereqirq: 1080ereqirq:
1081 clk_disable(i2c->clk); 1081 clk_disable(i2c->clk);
1082 i2c_pxa_disable(dev); 1082 i2c_pxa_disable(dev);
1083 iounmap(i2c->reg_base);
1083eremap: 1084eremap:
1084 clk_put(i2c->clk); 1085 clk_put(i2c->clk);
1085eclk: 1086eclk:
@@ -1089,7 +1090,7 @@ emalloc:
1089 return ret; 1090 return ret;
1090} 1091}
1091 1092
1092static int i2c_pxa_remove(struct platform_device *dev) 1093static int __exit i2c_pxa_remove(struct platform_device *dev)
1093{ 1094{
1094 struct pxa_i2c *i2c = platform_get_drvdata(dev); 1095 struct pxa_i2c *i2c = platform_get_drvdata(dev);
1095 1096
@@ -1103,6 +1104,7 @@ static int i2c_pxa_remove(struct platform_device *dev)
1103 clk_put(i2c->clk); 1104 clk_put(i2c->clk);
1104 i2c_pxa_disable(dev); 1105 i2c_pxa_disable(dev);
1105 1106
1107 iounmap(i2c->reg_base);
1106 release_mem_region(i2c->iobase, i2c->iosize); 1108 release_mem_region(i2c->iobase, i2c->iosize);
1107 kfree(i2c); 1109 kfree(i2c);
1108 1110
@@ -1111,9 +1113,10 @@ static int i2c_pxa_remove(struct platform_device *dev)
1111 1113
1112static struct platform_driver i2c_pxa_driver = { 1114static struct platform_driver i2c_pxa_driver = {
1113 .probe = i2c_pxa_probe, 1115 .probe = i2c_pxa_probe,
1114 .remove = i2c_pxa_remove, 1116 .remove = __exit_p(i2c_pxa_remove),
1115 .driver = { 1117 .driver = {
1116 .name = "pxa2xx-i2c", 1118 .name = "pxa2xx-i2c",
1119 .owner = THIS_MODULE,
1117 }, 1120 },
1118}; 1121};
1119 1122
@@ -1122,7 +1125,7 @@ static int __init i2c_adap_pxa_init(void)
1122 return platform_driver_register(&i2c_pxa_driver); 1125 return platform_driver_register(&i2c_pxa_driver);
1123} 1126}
1124 1127
1125static void i2c_adap_pxa_exit(void) 1128static void __exit i2c_adap_pxa_exit(void)
1126{ 1129{
1127 platform_driver_unregister(&i2c_pxa_driver); 1130 platform_driver_unregister(&i2c_pxa_driver);
1128} 1131}
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 501f00cea782..e47aca0ca5ae 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -1,6 +1,13 @@
1# 1#
2# Makefile for miscellaneous I2C chip drivers. 2# Makefile for miscellaneous I2C chip drivers.
3# 3#
4# Think twice before you add a new driver to this directory.
5# Device drivers are better grouped according to the functionality they
6# implement rather than to the bus they are connected to. In particular:
7# * Hardware monitoring chip drivers go to drivers/hwmon
8# * RTC chip drivers go to drivers/rtc
9# * I/O expander drivers go to drivers/gpio
10#
4 11
5obj-$(CONFIG_DS1682) += ds1682.o 12obj-$(CONFIG_DS1682) += ds1682.o
6obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o 13obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 96da22e9a5a4..fd84b2a36338 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -90,12 +90,16 @@ static int i2c_device_probe(struct device *dev)
90{ 90{
91 struct i2c_client *client = to_i2c_client(dev); 91 struct i2c_client *client = to_i2c_client(dev);
92 struct i2c_driver *driver = to_i2c_driver(dev->driver); 92 struct i2c_driver *driver = to_i2c_driver(dev->driver);
93 int status;
93 94
94 if (!driver->probe) 95 if (!driver->probe)
95 return -ENODEV; 96 return -ENODEV;
96 client->driver = driver; 97 client->driver = driver;
97 dev_dbg(dev, "probe\n"); 98 dev_dbg(dev, "probe\n");
98 return driver->probe(client); 99 status = driver->probe(client);
100 if (status)
101 client->driver = NULL;
102 return status;
99} 103}
100 104
101static int i2c_device_remove(struct device *dev) 105static int i2c_device_remove(struct device *dev)
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index df752e690e47..eed6d8e1b5c7 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -50,7 +50,7 @@ menuconfig IDE
50 To compile this driver as a module, choose M here: the 50 To compile this driver as a module, choose M here: the
51 module will be called ide. 51 module will be called ide.
52 52
53 For further information, please read <file:Documentation/ide.txt>. 53 For further information, please read <file:Documentation/ide/ide.txt>.
54 54
55 If unsure, say Y. 55 If unsure, say Y.
56 56
@@ -77,7 +77,7 @@ config BLK_DEV_IDE
77 Useful information about large (>540 MB) IDE disks, multiple 77 Useful information about large (>540 MB) IDE disks, multiple
78 interfaces, what to do if ATA/IDE devices are not automatically 78 interfaces, what to do if ATA/IDE devices are not automatically
79 detected, sound card ATA/IDE ports, module support, and other 79 detected, sound card ATA/IDE ports, module support, and other
80 topics, is contained in <file:Documentation/ide.txt>. For detailed 80 topics, is contained in <file:Documentation/ide/ide.txt>. For detailed
81 information about hard drives, consult the Disk-HOWTO and the 81 information about hard drives, consult the Disk-HOWTO and the
82 Multi-Disk-HOWTO, available from 82 Multi-Disk-HOWTO, available from
83 <http://www.tldp.org/docs.html#howto>. 83 <http://www.tldp.org/docs.html#howto>.
@@ -87,7 +87,7 @@ config BLK_DEV_IDE
87 <ftp://ibiblio.org/pub/Linux/system/hardware/>. 87 <ftp://ibiblio.org/pub/Linux/system/hardware/>.
88 88
89 To compile this driver as a module, choose M here and read 89 To compile this driver as a module, choose M here and read
90 <file:Documentation/ide.txt>. The module will be called ide-mod. 90 <file:Documentation/ide/ide.txt>. The module will be called ide-mod.
91 Do not compile this driver as a module if your root file system (the 91 Do not compile this driver as a module if your root file system (the
92 one containing the directory /) is located on an IDE device. 92 one containing the directory /) is located on an IDE device.
93 93
@@ -98,7 +98,7 @@ config BLK_DEV_IDE
98 98
99if BLK_DEV_IDE 99if BLK_DEV_IDE
100 100
101comment "Please see Documentation/ide.txt for help/info on IDE drives" 101comment "Please see Documentation/ide/ide.txt for help/info on IDE drives"
102 102
103config BLK_DEV_IDE_SATA 103config BLK_DEV_IDE_SATA
104 bool "Support for SATA (deprecated; conflicts with libata SATA driver)" 104 bool "Support for SATA (deprecated; conflicts with libata SATA driver)"
@@ -235,8 +235,8 @@ config BLK_DEV_IDETAPE
235 along with other IDE devices, as "hdb" or "hdc", or something 235 along with other IDE devices, as "hdb" or "hdc", or something
236 similar, and will be mapped to a character device such as "ht0" 236 similar, and will be mapped to a character device such as "ht0"
237 (check the boot messages with dmesg). Be sure to consult the 237 (check the boot messages with dmesg). Be sure to consult the
238 <file:drivers/ide/ide-tape.c> and <file:Documentation/ide.txt> files 238 <file:drivers/ide/ide-tape.c> and <file:Documentation/ide/ide.txt>
239 for usage information. 239 files for usage information.
240 240
241 To compile this driver as a module, choose M here: the 241 To compile this driver as a module, choose M here: the
242 module will be called ide-tape. 242 module will be called ide-tape.
@@ -358,7 +358,7 @@ config BLK_DEV_CMD640
358 358
359 The CMD640 chip is also used on add-in cards by Acculogic, and on 359 The CMD640 chip is also used on add-in cards by Acculogic, and on
360 the "CSA-6400E PCI to IDE controller" that some people have. For 360 the "CSA-6400E PCI to IDE controller" that some people have. For
361 details, read <file:Documentation/ide.txt>. 361 details, read <file:Documentation/ide/ide.txt>.
362 362
363config BLK_DEV_CMD640_ENHANCED 363config BLK_DEV_CMD640_ENHANCED
364 bool "CMD640 enhanced support" 364 bool "CMD640 enhanced support"
@@ -366,7 +366,7 @@ config BLK_DEV_CMD640_ENHANCED
366 help 366 help
367 This option includes support for setting/autotuning PIO modes and 367 This option includes support for setting/autotuning PIO modes and
368 prefetch on CMD640 IDE interfaces. For details, read 368 prefetch on CMD640 IDE interfaces. For details, read
369 <file:Documentation/ide.txt>. If you have a CMD640 IDE interface 369 <file:Documentation/ide/ide.txt>. If you have a CMD640 IDE interface
370 and your BIOS does not already do this for you, then say Y here. 370 and your BIOS does not already do this for you, then say Y here.
371 Otherwise say N. 371 Otherwise say N.
372 372
@@ -1069,9 +1069,9 @@ config BLK_DEV_ALI14XX
1069 This driver is enabled at runtime using the "ali14xx.probe" kernel 1069 This driver is enabled at runtime using the "ali14xx.probe" kernel
1070 boot parameter. It enables support for the secondary IDE interface 1070 boot parameter. It enables support for the secondary IDE interface
1071 of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster 1071 of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
1072 I/O speeds to be set as well. See the files 1072 I/O speeds to be set as well.
1073 <file:Documentation/ide.txt> and <file:drivers/ide/legacy/ali14xx.c> 1073 See the files <file:Documentation/ide/ide.txt> and
1074 for more info. 1074 <file:drivers/ide/legacy/ali14xx.c> for more info.
1075 1075
1076config BLK_DEV_DTC2278 1076config BLK_DEV_DTC2278
1077 tristate "DTC-2278 support" 1077 tristate "DTC-2278 support"
@@ -1079,7 +1079,7 @@ config BLK_DEV_DTC2278
1079 This driver is enabled at runtime using the "dtc2278.probe" kernel 1079 This driver is enabled at runtime using the "dtc2278.probe" kernel
1080 boot parameter. It enables support for the secondary IDE interface 1080 boot parameter. It enables support for the secondary IDE interface
1081 of the DTC-2278 card, and permits faster I/O speeds to be set as 1081 of the DTC-2278 card, and permits faster I/O speeds to be set as
1082 well. See the <file:Documentation/ide.txt> and 1082 well. See the <file:Documentation/ide/ide.txt> and
1083 <file:drivers/ide/legacy/dtc2278.c> files for more info. 1083 <file:drivers/ide/legacy/dtc2278.c> files for more info.
1084 1084
1085config BLK_DEV_HT6560B 1085config BLK_DEV_HT6560B
@@ -1088,7 +1088,7 @@ config BLK_DEV_HT6560B
1088 This driver is enabled at runtime using the "ht6560b.probe" kernel 1088 This driver is enabled at runtime using the "ht6560b.probe" kernel
1089 boot parameter. It enables support for the secondary IDE interface 1089 boot parameter. It enables support for the secondary IDE interface
1090 of the Holtek card, and permits faster I/O speeds to be set as well. 1090 of the Holtek card, and permits faster I/O speeds to be set as well.
1091 See the <file:Documentation/ide.txt> and 1091 See the <file:Documentation/ide/ide.txt> and
1092 <file:drivers/ide/legacy/ht6560b.c> files for more info. 1092 <file:drivers/ide/legacy/ht6560b.c> files for more info.
1093 1093
1094config BLK_DEV_QD65XX 1094config BLK_DEV_QD65XX
@@ -1096,7 +1096,7 @@ config BLK_DEV_QD65XX
1096 help 1096 help
1097 This driver is enabled at runtime using the "qd65xx.probe" kernel 1097 This driver is enabled at runtime using the "qd65xx.probe" kernel
1098 boot parameter. It permits faster I/O speeds to be set. See the 1098 boot parameter. It permits faster I/O speeds to be set. See the
1099 <file:Documentation/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> 1099 <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c>
1100 for more info. 1100 for more info.
1101 1101
1102config BLK_DEV_UMC8672 1102config BLK_DEV_UMC8672
@@ -1105,7 +1105,7 @@ config BLK_DEV_UMC8672
1105 This driver is enabled at runtime using the "umc8672.probe" kernel 1105 This driver is enabled at runtime using the "umc8672.probe" kernel
1106 boot parameter. It enables support for the secondary IDE interface 1106 boot parameter. It enables support for the secondary IDE interface
1107 of the UMC-8672, and permits faster I/O speeds to be set as well. 1107 of the UMC-8672, and permits faster I/O speeds to be set as well.
1108 See the files <file:Documentation/ide.txt> and 1108 See the files <file:Documentation/ide/ide.txt> and
1109 <file:drivers/ide/legacy/umc8672.c> for more info. 1109 <file:drivers/ide/legacy/umc8672.c> for more info.
1110 1110
1111endif 1111endif
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 310e497b5838..c8d0e8715997 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -670,8 +670,8 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
670 * and attempt to recover if there are problems. Returns 0 if everything's 670 * and attempt to recover if there are problems. Returns 0 if everything's
671 * ok; nonzero if the request has been terminated. 671 * ok; nonzero if the request has been terminated.
672 */ 672 */
673static 673static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
674int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) 674 int len, int ireason, int rw)
675{ 675{
676 /* 676 /*
677 * ireason == 0: the drive wants to receive data from us 677 * ireason == 0: the drive wants to receive data from us
@@ -701,6 +701,9 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw)
701 drive->name, __FUNCTION__, ireason); 701 drive->name, __FUNCTION__, ireason);
702 } 702 }
703 703
704 if (rq->cmd_type == REQ_TYPE_ATA_PC)
705 rq->cmd_flags |= REQ_FAILED;
706
704 cdrom_end_request(drive, 0); 707 cdrom_end_request(drive, 0);
705 return -1; 708 return -1;
706} 709}
@@ -1071,11 +1074,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1071 /* 1074 /*
1072 * check which way to transfer data 1075 * check which way to transfer data
1073 */ 1076 */
1074 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1077 if (ide_cd_check_ireason(drive, rq, len, ireason, write))
1075 if (ide_cd_check_ireason(drive, len, ireason, write)) 1078 return ide_stopped;
1076 return ide_stopped;
1077 1079
1078 if (blk_fs_request(rq) && write == 0) { 1080 if (blk_fs_request(rq)) {
1081 if (write == 0) {
1079 int nskip; 1082 int nskip;
1080 1083
1081 if (ide_cd_check_transfer_size(drive, len)) { 1084 if (ide_cd_check_transfer_size(drive, len)) {
@@ -1101,16 +1104,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1101 if (ireason == 0) { 1104 if (ireason == 0) {
1102 write = 1; 1105 write = 1;
1103 xferfunc = HWIF(drive)->atapi_output_bytes; 1106 xferfunc = HWIF(drive)->atapi_output_bytes;
1104 } else if (ireason == 2 || (ireason == 1 && 1107 } else {
1105 (blk_fs_request(rq) || blk_pc_request(rq)))) {
1106 write = 0; 1108 write = 0;
1107 xferfunc = HWIF(drive)->atapi_input_bytes; 1109 xferfunc = HWIF(drive)->atapi_input_bytes;
1108 } else {
1109 printk(KERN_ERR "%s: %s: The drive "
1110 "appears confused (ireason = 0x%02x). "
1111 "Trying to recover by ending request.\n",
1112 drive->name, __FUNCTION__, ireason);
1113 goto end_request;
1114 } 1110 }
1115 1111
1116 /* 1112 /*
@@ -1182,11 +1178,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1182 else 1178 else
1183 rq->data += blen; 1179 rq->data += blen;
1184 } 1180 }
1181 if (!write && blk_sense_request(rq))
1182 rq->sense_len += blen;
1185 } 1183 }
1186 1184
1187 if (write && blk_sense_request(rq))
1188 rq->sense_len += thislen;
1189
1190 /* 1185 /*
1191 * pad, if necessary 1186 * pad, if necessary
1192 */ 1187 */
@@ -1931,6 +1926,7 @@ static const struct cd_list_entry ide_cd_quirks_list[] = {
1931 { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1926 { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
1932 { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1927 { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
1933 { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, 1928 { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
1929 { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK },
1934 { NULL, NULL, 0 } 1930 { NULL, NULL, 0 }
1935}; 1931};
1936 1932
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index b68284de4e85..6d147ce6782f 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -457,6 +457,10 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
457 layer. the packet must be complete, as we do not 457 layer. the packet must be complete, as we do not
458 touch it at all. */ 458 touch it at all. */
459 ide_cd_init_rq(drive, &req); 459 ide_cd_init_rq(drive, &req);
460
461 if (cgc->data_direction == CGC_DATA_WRITE)
462 req.cmd_flags |= REQ_RW;
463
460 memcpy(req.cmd, cgc->cmd, CDROM_PACKET_SIZE); 464 memcpy(req.cmd, cgc->cmd, CDROM_PACKET_SIZE);
461 if (cgc->sense) 465 if (cgc->sense)
462 memset(cgc->sense, 0, sizeof(struct request_sense)); 466 memset(cgc->sense, 0, sizeof(struct request_sense));
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 8f5bed471050..39501d130256 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -867,7 +867,7 @@ static void idedisk_setup (ide_drive_t *drive)
867 867
868 /* Only print cache size when it was specified */ 868 /* Only print cache size when it was specified */
869 if (id->buf_size) 869 if (id->buf_size)
870 printk (" w/%dKiB Cache", id->buf_size/2); 870 printk(KERN_CONT " w/%dKiB Cache", id->buf_size / 2);
871 871
872 printk(KERN_CONT ", CHS=%d/%d/%d\n", 872 printk(KERN_CONT ", CHS=%d/%d/%d\n",
873 drive->bios_cyl, drive->bios_head, drive->bios_sect); 873 drive->bios_cyl, drive->bios_head, drive->bios_sect);
@@ -949,7 +949,8 @@ static void ide_device_shutdown(ide_drive_t *drive)
949 return; 949 return;
950 } 950 }
951 951
952 printk("Shutdown: %s\n", drive->name); 952 printk(KERN_INFO "Shutdown: %s\n", drive->name);
953
953 drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); 954 drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
954} 955}
955 956
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index d0e7b537353e..d61e5788d310 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -1,9 +1,13 @@
1/* 1/*
2 * IDE DMA support (including IDE PCI BM-DMA).
3 *
2 * Copyright (C) 1995-1998 Mark Lord 4 * Copyright (C) 1995-1998 Mark Lord
3 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> 5 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
4 * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz 6 * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz
5 * 7 *
6 * May be copied or modified under the terms of the GNU General Public License 8 * May be copied or modified under the terms of the GNU General Public License
9 *
10 * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
7 */ 11 */
8 12
9/* 13/*
@@ -11,49 +15,6 @@
11 */ 15 */
12 16
13/* 17/*
14 * This module provides support for the bus-master IDE DMA functions
15 * of various PCI chipsets, including the Intel PIIX (i82371FB for
16 * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and
17 * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset)
18 * ("PIIX" stands for "PCI ISA IDE Xcellerator").
19 *
20 * Pretty much the same code works for other IDE PCI bus-mastering chipsets.
21 *
22 * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
23 *
24 * By default, DMA support is prepared for use, but is currently enabled only
25 * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single),
26 * or which are recognized as "good" (see table below). Drives with only mode0
27 * or mode1 (multi/single) DMA should also work with this chipset/driver
28 * (eg. MC2112A) but are not enabled by default.
29 *
30 * Use "hdparm -i" to view modes supported by a given drive.
31 *
32 * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling
33 * DMA support, but must be (re-)compiled against this kernel version or later.
34 *
35 * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting.
36 * If problems arise, ide.c will disable DMA operation after a few retries.
37 * This error recovery mechanism works and has been extremely well exercised.
38 *
39 * IDE drives, depending on their vintage, may support several different modes
40 * of DMA operation. The boot-time modes are indicated with a "*" in
41 * the "hdparm -i" listing, and can be changed with *knowledgeable* use of
42 * the "hdparm -X" feature. There is seldom a need to do this, as drives
43 * normally power-up with their "best" PIO/DMA modes enabled.
44 *
45 * Testing has been done with a rather extensive number of drives,
46 * with Quantum & Western Digital models generally outperforming the pack,
47 * and Fujitsu & Conner (and some Seagate which are really Conner) drives
48 * showing more lackluster throughput.
49 *
50 * Keep an eye on /var/adm/messages for "DMA disabled" messages.
51 *
52 * Some people have reported trouble with Intel Zappa motherboards.
53 * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0,
54 * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe
55 * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this).
56 *
57 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for 18 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
58 * fixing the problem with the BIOS on some Acer motherboards. 19 * fixing the problem with the BIOS on some Acer motherboards.
59 * 20 *
@@ -65,11 +26,6 @@
65 * 26 *
66 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> 27 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
67 * for supplying a Promise UDMA board & WD UDMA drive for this work! 28 * for supplying a Promise UDMA board & WD UDMA drive for this work!
68 *
69 * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports.
70 *
71 * ATA-66/100 and recovery functions, I forgot the rest......
72 *
73 */ 29 */
74 30
75#include <linux/module.h> 31#include <linux/module.h>
@@ -757,7 +713,7 @@ static int ide_tune_dma(ide_drive_t *drive)
757 } 713 }
758 714
759 if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) 715 if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
760 return 0; 716 return 1;
761 717
762 if (ide_set_dma_mode(drive, speed)) 718 if (ide_set_dma_mode(drive, speed))
763 return 0; 719 return 0;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 4a2cb2868226..194ecb0049eb 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -756,7 +756,8 @@ static int ide_probe_port(ide_hwif_t *hwif)
756 756
757 BUG_ON(hwif->present); 757 BUG_ON(hwif->present);
758 758
759 if (hwif->noprobe) 759 if (hwif->noprobe ||
760 (hwif->drives[0].noprobe && hwif->drives[1].noprobe))
760 return -EACCES; 761 return -EACCES;
761 762
762 /* 763 /*
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 0598ecfd5f37..43e0e0557776 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -3765,6 +3765,11 @@ static int ide_tape_probe(ide_drive_t *drive)
3765 g->fops = &idetape_block_ops; 3765 g->fops = &idetape_block_ops;
3766 ide_register_region(g); 3766 ide_register_region(g);
3767 3767
3768 printk(KERN_WARNING "It is possible that this driver does not have any"
3769 " users anymore and, as a result, it will be REMOVED soon."
3770 " Please notify Bart <bzolnier@gmail.com> or Boris"
3771 " <petkovbb@gmail.com> in case you still need it.\n");
3772
3768 return 0; 3773 return 0;
3769 3774
3770out_free_tape: 3775out_free_tape:
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 477833f0daf5..9976f9d627d4 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -590,11 +590,6 @@ void ide_unregister(unsigned int index, int init_default, int restore)
590 hwif->extra_ports = 0; 590 hwif->extra_ports = 0;
591 } 591 }
592 592
593 /*
594 * Note that we only release the standard ports,
595 * and do not even try to handle any extra ports
596 * allocated for weird IDE interface chipsets.
597 */
598 ide_hwif_release_regions(hwif); 593 ide_hwif_release_regions(hwif);
599 594
600 /* copy original settings */ 595 /* copy original settings */
@@ -672,7 +667,6 @@ int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *),
672 667
673 do { 668 do {
674 hwif = ide_deprecated_find_port(hw->io_ports[IDE_DATA_OFFSET]); 669 hwif = ide_deprecated_find_port(hw->io_ports[IDE_DATA_OFFSET]);
675 index = hwif->index;
676 if (hwif) 670 if (hwif)
677 goto found; 671 goto found;
678 for (index = 0; index < MAX_HWIFS; index++) 672 for (index = 0; index < MAX_HWIFS; index++)
@@ -680,6 +674,7 @@ int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *),
680 } while (retry--); 674 } while (retry--);
681 return -1; 675 return -1;
682found: 676found:
677 index = hwif->index;
683 if (hwif->present) 678 if (hwif->present)
684 ide_unregister(index, 0, 1); 679 ide_unregister(index, 0, 1);
685 else if (!hwif->hold) 680 else if (!hwif->hold)
@@ -1036,10 +1031,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
1036 drive->nice1 = (arg >> IDE_NICE_1) & 1; 1031 drive->nice1 = (arg >> IDE_NICE_1) & 1;
1037 return 0; 1032 return 0;
1038 case HDIO_DRIVE_RESET: 1033 case HDIO_DRIVE_RESET:
1039 { 1034 if (!capable(CAP_SYS_ADMIN))
1040 unsigned long flags; 1035 return -EACCES;
1041 if (!capable(CAP_SYS_ADMIN)) return -EACCES; 1036
1042
1043 /* 1037 /*
1044 * Abort the current command on the 1038 * Abort the current command on the
1045 * group if there is one, taking 1039 * group if there is one, taking
@@ -1058,17 +1052,15 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
1058 ide_abort(drive, "drive reset"); 1052 ide_abort(drive, "drive reset");
1059 1053
1060 BUG_ON(HWGROUP(drive)->handler); 1054 BUG_ON(HWGROUP(drive)->handler);
1061 1055
1062 /* Ensure nothing gets queued after we 1056 /* Ensure nothing gets queued after we
1063 drop the lock. Reset will clear the busy */ 1057 drop the lock. Reset will clear the busy */
1064 1058
1065 HWGROUP(drive)->busy = 1; 1059 HWGROUP(drive)->busy = 1;
1066 spin_unlock_irqrestore(&ide_lock, flags); 1060 spin_unlock_irqrestore(&ide_lock, flags);
1067 (void) ide_do_reset(drive); 1061 (void) ide_do_reset(drive);
1068 1062
1069 return 0; 1063 return 0;
1070 }
1071
1072 case HDIO_GET_BUSSTATE: 1064 case HDIO_GET_BUSSTATE:
1073 if (!capable(CAP_SYS_ADMIN)) 1065 if (!capable(CAP_SYS_ADMIN))
1074 return -EACCES; 1066 return -EACCES;
@@ -1188,7 +1180,7 @@ static int __initdata is_chipset_set[MAX_HWIFS];
1188 * ide_setup() gets called VERY EARLY during initialization, 1180 * ide_setup() gets called VERY EARLY during initialization,
1189 * to handle kernel "command line" strings beginning with "hdx=" or "ide". 1181 * to handle kernel "command line" strings beginning with "hdx=" or "ide".
1190 * 1182 *
1191 * Remember to update Documentation/ide.txt if you change something here. 1183 * Remember to update Documentation/ide/ide.txt if you change something here.
1192 */ 1184 */
1193static int __init ide_setup(char *s) 1185static int __init ide_setup(char *s)
1194{ 1186{
@@ -1449,7 +1441,7 @@ static int __init ide_setup(char *s)
1449 1441
1450 case -1: /* "noprobe" */ 1442 case -1: /* "noprobe" */
1451 hwif->noprobe = 1; 1443 hwif->noprobe = 1;
1452 goto done; 1444 goto obsolete_option;
1453 1445
1454 case 1: /* base */ 1446 case 1: /* base */
1455 vals[1] = vals[0] + 0x206; /* default ctl */ 1447 vals[1] = vals[0] + 0x206; /* default ctl */
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c
index bba29df5f21d..2f4f47ad602f 100644
--- a/drivers/ide/legacy/qd65xx.c
+++ b/drivers/ide/legacy/qd65xx.c
@@ -334,43 +334,6 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
334 hwif->drives[1].drive_data = t2; 334 hwif->drives[1].drive_data = t2;
335} 335}
336 336
337/*
338 * qd_unsetup:
339 *
340 * called to unsetup an ata channel : back to default values, unlinks tuning
341 */
342/*
343static void __exit qd_unsetup(ide_hwif_t *hwif)
344{
345 u8 config = hwif->config_data;
346 int base = hwif->select_data;
347 void *set_pio_mode = (void *)hwif->set_pio_mode;
348
349 if (hwif->chipset != ide_qd65xx)
350 return;
351
352 printk(KERN_NOTICE "%s: back to defaults\n", hwif->name);
353
354 hwif->selectproc = NULL;
355 hwif->set_pio_mode = NULL;
356
357 if (set_pio_mode == (void *)qd6500_set_pio_mode) {
358 // will do it for both
359 outb(QD6500_DEF_DATA, QD_TIMREG(&hwif->drives[0]));
360 } else if (set_pio_mode == (void *)qd6580_set_pio_mode) {
361 if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) {
362 outb(QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0]));
363 outb(QD6580_DEF_DATA2, QD_TIMREG(&hwif->drives[1]));
364 } else {
365 outb(hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0]));
366 }
367 } else {
368 printk(KERN_WARNING "Unknown qd65xx tuning fonction !\n");
369 printk(KERN_WARNING "keeping settings !\n");
370 }
371}
372*/
373
374static const struct ide_port_info qd65xx_port_info __initdata = { 337static const struct ide_port_info qd65xx_port_info __initdata = {
375 .chipset = ide_qd65xx, 338 .chipset = ide_qd65xx,
376 .host_flags = IDE_HFLAG_IO_32BIT | 339 .host_flags = IDE_HFLAG_IO_32BIT |
@@ -444,6 +407,8 @@ static int __init qd_probe(int base)
444 printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", 407 printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n",
445 config, control, QD_ID3); 408 config, control, QD_ID3);
446 409
410 outb(QD_DEF_CONTR, QD_CONTROL_PORT);
411
447 if (control & QD_CONTR_SEC_DISABLED) { 412 if (control & QD_CONTR_SEC_DISABLED) {
448 /* secondary disabled */ 413 /* secondary disabled */
449 414
@@ -460,8 +425,6 @@ static int __init qd_probe(int base)
460 425
461 ide_device_add(idx, &qd65xx_port_info); 426 ide_device_add(idx, &qd65xx_port_info);
462 427
463 outb(QD_DEF_CONTR, QD_CONTROL_PORT);
464
465 return 1; 428 return 1;
466 } else { 429 } else {
467 ide_hwif_t *mate; 430 ide_hwif_t *mate;
@@ -487,8 +450,6 @@ static int __init qd_probe(int base)
487 450
488 ide_device_add(idx, &qd65xx_port_info); 451 ide_device_add(idx, &qd65xx_port_info);
489 452
490 outb(QD_DEF_CONTR, QD_CONTROL_PORT);
491
492 return 0; /* no other qd65xx possible */ 453 return 0; /* no other qd65xx possible */
493 } 454 }
494 } 455 }
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c
index bd24dad3cfc6..ec667982809c 100644
--- a/drivers/ide/pci/cmd640.c
+++ b/drivers/ide/pci/cmd640.c
@@ -787,7 +787,8 @@ static int __init cmd640x_init(void)
787 /* 787 /*
788 * Try to enable the secondary interface, if not already enabled 788 * Try to enable the secondary interface, if not already enabled
789 */ 789 */
790 if (cmd_hwif1->noprobe) { 790 if (cmd_hwif1->noprobe ||
791 (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe)) {
791 port2 = "not probed"; 792 port2 = "not probed";
792 } else { 793 } else {
793 b = get_cmd640_reg(CNTRL); 794 b = get_cmd640_reg(CNTRL);
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index d0f7bb8b8adf..6357bb6269ab 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1570,10 +1570,12 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
1570 if (rev < 3) 1570 if (rev < 3)
1571 info = &hpt36x; 1571 info = &hpt36x;
1572 else { 1572 else {
1573 static const struct hpt_info *hpt37x_info[] = 1573 switch (min_t(u8, rev, 6)) {
1574 { &hpt370, &hpt370a, &hpt372, &hpt372n }; 1574 case 3: info = &hpt370; break;
1575 1575 case 4: info = &hpt370a; break;
1576 info = hpt37x_info[min_t(u8, rev, 6) - 3]; 1576 case 5: info = &hpt372; break;
1577 case 6: info = &hpt372n; break;
1578 }
1577 idx++; 1579 idx++;
1578 } 1580 }
1579 break; 1581 break;
@@ -1626,7 +1628,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
1626 return ide_setup_pci_device(dev, &d); 1628 return ide_setup_pci_device(dev, &d);
1627} 1629}
1628 1630
1629static const struct pci_device_id hpt366_pci_tbl[] = { 1631static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = {
1630 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, 1632 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 },
1631 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, 1633 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 },
1632 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, 1634 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 },
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 12ac3bfb4f9a..78c9eeb85634 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1254,7 +1254,7 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1254 int rc = 0; 1254 int rc = 0;
1255 1255
1256 if (mesg.event != mdev->ofdev.dev.power.power_state.event 1256 if (mesg.event != mdev->ofdev.dev.power.power_state.event
1257 && mesg.event == PM_EVENT_SUSPEND) { 1257 && (mesg.event & PM_EVENT_SLEEP)) {
1258 rc = pmac_ide_do_suspend(hwif); 1258 rc = pmac_ide_do_suspend(hwif);
1259 if (rc == 0) 1259 if (rc == 0)
1260 mdev->ofdev.dev.power.power_state = mesg; 1260 mdev->ofdev.dev.power.power_state = mesg;
@@ -1364,7 +1364,7 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1364 int rc = 0; 1364 int rc = 0;
1365 1365
1366 if (mesg.event != pdev->dev.power.power_state.event 1366 if (mesg.event != pdev->dev.power.power_state.event
1367 && mesg.event == PM_EVENT_SUSPEND) { 1367 && (mesg.event & PM_EVENT_SLEEP)) {
1368 rc = pmac_ide_do_suspend(hwif); 1368 rc = pmac_ide_do_suspend(hwif);
1369 if (rc == 0) 1369 if (rc == 0)
1370 pdev->dev.power.power_state = mesg; 1370 pdev->dev.power.power_state = mesg;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 28e155a9e2a5..9e2b1964d71a 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -183,6 +183,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
183 * Avoids access beyond actual disk limits on devices with an off-by-one bug. 183 * Avoids access beyond actual disk limits on devices with an off-by-one bug.
184 * Don't use this with devices which don't have this bug. 184 * Don't use this with devices which don't have this bug.
185 * 185 *
186 * - delay inquiry
187 * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
188 *
186 * - override internal blacklist 189 * - override internal blacklist
187 * Instead of adding to the built-in blacklist, use only the workarounds 190 * Instead of adding to the built-in blacklist, use only the workarounds
188 * specified in the module load parameter. 191 * specified in the module load parameter.
@@ -195,6 +198,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
195 ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) 198 ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
196 ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) 199 ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
197 ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) 200 ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
201 ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
198 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) 202 ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
199 ", or a combination)"); 203 ", or a combination)");
200 204
@@ -357,6 +361,11 @@ static const struct {
357 .workarounds = SBP2_WORKAROUND_INQUIRY_36 | 361 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
358 SBP2_WORKAROUND_MODE_SENSE_8, 362 SBP2_WORKAROUND_MODE_SENSE_8,
359 }, 363 },
364 /* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
365 .firmware_revision = 0x002800,
366 .model_id = 0x000000,
367 .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY,
368 },
360 /* Initio bridges, actually only needed for some older ones */ { 369 /* Initio bridges, actually only needed for some older ones */ {
361 .firmware_revision = 0x000200, 370 .firmware_revision = 0x000200,
362 .model_id = SBP2_ROM_VALUE_WILDCARD, 371 .model_id = SBP2_ROM_VALUE_WILDCARD,
@@ -914,6 +923,9 @@ static int sbp2_start_device(struct sbp2_lu *lu)
914 sbp2_agent_reset(lu, 1); 923 sbp2_agent_reset(lu, 1);
915 sbp2_max_speed_and_size(lu); 924 sbp2_max_speed_and_size(lu);
916 925
926 if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
927 ssleep(SBP2_INQUIRY_DELAY);
928
917 error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); 929 error = scsi_add_device(lu->shost, 0, lu->ud->id, 0);
918 if (error) { 930 if (error) {
919 SBP2_ERR("scsi_add_device failed"); 931 SBP2_ERR("scsi_add_device failed");
@@ -1962,6 +1974,9 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
1962{ 1974{
1963 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; 1975 struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0];
1964 1976
1977 if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0)
1978 return -ENODEV;
1979
1965 lu->sdev = sdev; 1980 lu->sdev = sdev;
1966 sdev->allow_restart = 1; 1981 sdev->allow_restart = 1;
1967 1982
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h
index d2ecb0d8a1bb..80d8e097b065 100644
--- a/drivers/ieee1394/sbp2.h
+++ b/drivers/ieee1394/sbp2.h
@@ -343,6 +343,8 @@ enum sbp2lu_state_types {
343#define SBP2_WORKAROUND_INQUIRY_36 0x2 343#define SBP2_WORKAROUND_INQUIRY_36 0x2
344#define SBP2_WORKAROUND_MODE_SENSE_8 0x4 344#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
345#define SBP2_WORKAROUND_FIX_CAPACITY 0x8 345#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
346#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10
347#define SBP2_INQUIRY_DELAY 12
346#define SBP2_WORKAROUND_OVERRIDE 0x100 348#define SBP2_WORKAROUND_OVERRIDE 0x100
347 349
348#endif /* SBP2_H */ 350#endif /* SBP2_H */
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index b10ade92efed..4df405157086 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3759,6 +3759,7 @@ static void cm_remove_one(struct ib_device *device)
3759 port = cm_dev->port[i-1]; 3759 port = cm_dev->port[i-1];
3760 ib_modify_port(device, port->port_num, 0, &port_modify); 3760 ib_modify_port(device, port->port_num, 0, &port_modify);
3761 ib_unregister_mad_agent(port->mad_agent); 3761 ib_unregister_mad_agent(port->mad_agent);
3762 flush_workqueue(cm.wq);
3762 cm_remove_port_fs(port); 3763 cm_remove_port_fs(port);
3763 } 3764 }
3764 kobject_put(&cm_dev->dev_obj); 3765 kobject_put(&cm_dev->dev_obj);
@@ -3813,6 +3814,7 @@ static void __exit ib_cm_cleanup(void)
3813 cancel_delayed_work(&timewait_info->work.work); 3814 cancel_delayed_work(&timewait_info->work.work);
3814 spin_unlock_irq(&cm.lock); 3815 spin_unlock_irq(&cm.lock);
3815 3816
3817 ib_unregister_client(&cm_client);
3816 destroy_workqueue(cm.wq); 3818 destroy_workqueue(cm.wq);
3817 3819
3818 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { 3820 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
@@ -3820,7 +3822,6 @@ static void __exit ib_cm_cleanup(void)
3820 kfree(timewait_info); 3822 kfree(timewait_info);
3821 } 3823 }
3822 3824
3823 ib_unregister_client(&cm_client);
3824 class_unregister(&cm_class); 3825 class_unregister(&cm_class);
3825 idr_destroy(&cm.local_id_table); 3826 idr_destroy(&cm.local_id_table);
3826} 3827}
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 7f00347364f7..06d502c06a4d 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
139static void ib_fmr_batch_release(struct ib_fmr_pool *pool) 139static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
140{ 140{
141 int ret; 141 int ret;
142 struct ib_pool_fmr *fmr, *next; 142 struct ib_pool_fmr *fmr;
143 LIST_HEAD(unmap_list); 143 LIST_HEAD(unmap_list);
144 LIST_HEAD(fmr_list); 144 LIST_HEAD(fmr_list);
145 145
@@ -158,20 +158,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
158#endif 158#endif
159 } 159 }
160 160
161 /*
162 * The free_list may hold FMRs that have been put there
163 * because they haven't reached the max_remap count.
164 * Invalidate their mapping as well.
165 */
166 list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
167 if (fmr->remap_count == 0)
168 continue;
169 hlist_del_init(&fmr->cache_node);
170 fmr->remap_count = 0;
171 list_add_tail(&fmr->fmr->list, &fmr_list);
172 list_move(&fmr->list, &unmap_list);
173 }
174
175 list_splice(&pool->dirty_list, &unmap_list); 161 list_splice(&pool->dirty_list, &unmap_list);
176 INIT_LIST_HEAD(&pool->dirty_list); 162 INIT_LIST_HEAD(&pool->dirty_list);
177 pool->dirty_len = 0; 163 pool->dirty_len = 0;
@@ -384,6 +370,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
384 370
385 i = 0; 371 i = 0;
386 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { 372 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
373 if (fmr->remap_count) {
374 INIT_LIST_HEAD(&fmr_list);
375 list_add_tail(&fmr->fmr->list, &fmr_list);
376 ib_unmap_fmr(&fmr_list);
377 }
387 ib_dealloc_fmr(fmr->fmr); 378 ib_dealloc_fmr(fmr->fmr);
388 list_del(&fmr->list); 379 list_del(&fmr->list);
389 kfree(fmr); 380 kfree(fmr);
@@ -407,8 +398,23 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool);
407 */ 398 */
408int ib_flush_fmr_pool(struct ib_fmr_pool *pool) 399int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
409{ 400{
410 int serial = atomic_inc_return(&pool->req_ser); 401 int serial;
402 struct ib_pool_fmr *fmr, *next;
403
404 /*
405 * The free_list holds FMRs that may have been used
406 * but have not been remapped enough times to be dirty.
407 * Put them on the dirty list now so that the cleanup
408 * thread will reap them too.
409 */
410 spin_lock_irq(&pool->pool_lock);
411 list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
412 if (fmr->remap_count > 0)
413 list_move(&fmr->list, &pool->dirty_list);
414 }
415 spin_unlock_irq(&pool->pool_lock);
411 416
417 serial = atomic_inc_return(&pool->req_ser);
412 wake_up_process(pool->thread); 418 wake_up_process(pool->thread);
413 419
414 if (wait_event_interruptible(pool->force_wait, 420 if (wait_event_interruptible(pool->force_wait,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 223b1aa7d92b..81c9195b512a 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -839,6 +839,7 @@ static void cm_work_handler(struct work_struct *_work)
839 unsigned long flags; 839 unsigned long flags;
840 int empty; 840 int empty;
841 int ret = 0; 841 int ret = 0;
842 int destroy_id;
842 843
843 spin_lock_irqsave(&cm_id_priv->lock, flags); 844 spin_lock_irqsave(&cm_id_priv->lock, flags);
844 empty = list_empty(&cm_id_priv->work_list); 845 empty = list_empty(&cm_id_priv->work_list);
@@ -857,9 +858,9 @@ static void cm_work_handler(struct work_struct *_work)
857 destroy_cm_id(&cm_id_priv->id); 858 destroy_cm_id(&cm_id_priv->id);
858 } 859 }
859 BUG_ON(atomic_read(&cm_id_priv->refcount)==0); 860 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
861 destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
860 if (iwcm_deref_id(cm_id_priv)) { 862 if (iwcm_deref_id(cm_id_priv)) {
861 if (test_bit(IWCM_F_CALLBACK_DESTROY, 863 if (destroy_id) {
862 &cm_id_priv->flags)) {
863 BUG_ON(!list_empty(&cm_id_priv->work_list)); 864 BUG_ON(!list_empty(&cm_id_priv->work_list));
864 free_cm_id(cm_id_priv); 865 free_cm_id(cm_id_priv);
865 } 866 }
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index 73bfd1656f86..b8797c66676d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -136,14 +136,8 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
136 136
137 /* Find largest page shift we can use to cover buffers */ 137 /* Find largest page shift we can use to cover buffers */
138 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) 138 for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
139 if (num_phys_buf > 1) { 139 if ((1ULL << *shift) & mask)
140 if ((1ULL << *shift) & mask) 140 break;
141 break;
142 } else
143 if (1ULL << *shift >=
144 buffer_list[0].size +
145 (buffer_list[0].addr & ((1ULL << *shift) - 1)))
146 break;
147 141
148 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); 142 buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
149 buffer_list[0].addr &= ~0ull << *shift; 143 buffer_list[0].addr &= ~0ull << *shift;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index df1838f8f94d..b2ea9210467f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -189,7 +189,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
189 return ERR_PTR(-ENOMEM); 189 return ERR_PTR(-ENOMEM);
190 } 190 }
191 chp->rhp = rhp; 191 chp->rhp = rhp;
192 chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; 192 chp->ibcq.cqe = 1 << chp->cq.size_log2;
193 spin_lock_init(&chp->lock); 193 spin_lock_init(&chp->lock);
194 atomic_set(&chp->refcnt, 1); 194 atomic_set(&chp->refcnt, 1);
195 init_waitqueue_head(&chp->wait); 195 init_waitqueue_head(&chp->wait);
@@ -819,8 +819,11 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
819 kfree(qhp); 819 kfree(qhp);
820 return ERR_PTR(-ENOMEM); 820 return ERR_PTR(-ENOMEM);
821 } 821 }
822
822 attrs->cap.max_recv_wr = rqsize - 1; 823 attrs->cap.max_recv_wr = rqsize - 1;
823 attrs->cap.max_send_wr = sqsize; 824 attrs->cap.max_send_wr = sqsize;
825 attrs->cap.max_inline_data = T3_MAX_INLINE;
826
824 qhp->rhp = rhp; 827 qhp->rhp = rhp;
825 qhp->attr.pd = php->pdid; 828 qhp->attr.pd = php->pdid;
826 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; 829 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 7f8853b44ee1..b2112f5a422f 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -567,12 +567,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
567 567
568 /* Init the adapter */ 568 /* Init the adapter */
569 nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); 569 nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev);
570 nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
571 if (!nesdev->nesadapter) { 570 if (!nesdev->nesadapter) {
572 printk(KERN_ERR PFX "Unable to initialize adapter.\n"); 571 printk(KERN_ERR PFX "Unable to initialize adapter.\n");
573 ret = -ENOMEM; 572 ret = -ENOMEM;
574 goto bail5; 573 goto bail5;
575 } 574 }
575 nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
576 576
577 /* nesdev->base_doorbell_index = 577 /* nesdev->base_doorbell_index =
578 nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ 578 nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index fd57e8a1582f..a48b288618ec 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -285,6 +285,21 @@ struct nes_device {
285}; 285};
286 286
287 287
288static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad)
289{
290 u32 crc_value;
291 crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad));
292
293 /*
294 * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc
295 * state in cpu order"), behavior of crc32c changes on
296 * big-endian platforms. Our algorithm expects the previous
297 * behavior; otherwise we have RDMA connection establishment
298 * issue on big-endian.
299 */
300 return cpu_to_le32(crc_value);
301}
302
288static inline void 303static inline void
289set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) 304set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
290{ 305{
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index bd5cfeaac203..39adb267fb15 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -370,11 +370,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
370 int ret = 0; 370 int ret = 0;
371 u32 was_timer_set; 371 u32 was_timer_set;
372 372
373 if (!cm_node)
374 return -EINVAL;
373 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); 375 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
374 if (!new_send) 376 if (!new_send)
375 return -1; 377 return -1;
376 if (!cm_node)
377 return -EINVAL;
378 378
379 /* new_send->timetosend = currenttime */ 379 /* new_send->timetosend = currenttime */
380 new_send->retrycount = NES_DEFAULT_RETRYS; 380 new_send->retrycount = NES_DEFAULT_RETRYS;
@@ -947,6 +947,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
947 nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); 947 nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
948 948
949 kfree(listener); 949 kfree(listener);
950 listener = NULL;
950 ret = 0; 951 ret = 0;
951 cm_listens_destroyed++; 952 cm_listens_destroyed++;
952 } else { 953 } else {
@@ -2319,6 +2320,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2319 struct iw_cm_event cm_event; 2320 struct iw_cm_event cm_event;
2320 struct nes_hw_qp_wqe *wqe; 2321 struct nes_hw_qp_wqe *wqe;
2321 struct nes_v4_quad nes_quad; 2322 struct nes_v4_quad nes_quad;
2323 u32 crc_value;
2322 int ret; 2324 int ret;
2323 2325
2324 ibqp = nes_get_qp(cm_id->device, conn_param->qpn); 2326 ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
@@ -2435,8 +2437,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2435 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; 2437 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
2436 2438
2437 /* Produce hash key */ 2439 /* Produce hash key */
2438 nesqp->hte_index = cpu_to_be32( 2440 crc_value = get_crc_value(&nes_quad);
2439 crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); 2441 nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
2440 nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", 2442 nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
2441 nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); 2443 nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
2442 2444
@@ -2750,6 +2752,7 @@ void cm_event_connected(struct nes_cm_event *event)
2750 struct iw_cm_event cm_event; 2752 struct iw_cm_event cm_event;
2751 struct nes_hw_qp_wqe *wqe; 2753 struct nes_hw_qp_wqe *wqe;
2752 struct nes_v4_quad nes_quad; 2754 struct nes_v4_quad nes_quad;
2755 u32 crc_value;
2753 int ret; 2756 int ret;
2754 2757
2755 /* get all our handles */ 2758 /* get all our handles */
@@ -2827,8 +2830,8 @@ void cm_event_connected(struct nes_cm_event *event)
2827 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; 2830 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
2828 2831
2829 /* Produce hash key */ 2832 /* Produce hash key */
2830 nesqp->hte_index = cpu_to_be32( 2833 crc_value = get_crc_value(&nes_quad);
2831 crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); 2834 nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
2832 nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", 2835 nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n",
2833 nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); 2836 nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
2834 2837
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 7c4c0fbf0abd..49e53e4c1ebe 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -156,15 +156,14 @@ static void nes_nic_tune_timer(struct nes_device *nesdev)
156 156
157 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); 157 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
158 158
159 if (shared_timer->cq_count_old < cq_count) { 159 if (shared_timer->cq_count_old <= cq_count)
160 if (cq_count > shared_timer->threshold_low) 160 shared_timer->cq_direction_downward = 0;
161 shared_timer->cq_direction_downward=0; 161 else
162 }
163 if (shared_timer->cq_count_old >= cq_count)
164 shared_timer->cq_direction_downward++; 162 shared_timer->cq_direction_downward++;
165 shared_timer->cq_count_old = cq_count; 163 shared_timer->cq_count_old = cq_count;
166 if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { 164 if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) {
167 if (cq_count <= shared_timer->threshold_low) { 165 if (cq_count <= shared_timer->threshold_low &&
166 shared_timer->threshold_low > 4) {
168 shared_timer->threshold_low = shared_timer->threshold_low/2; 167 shared_timer->threshold_low = shared_timer->threshold_low/2;
169 shared_timer->cq_direction_downward=0; 168 shared_timer->cq_direction_downward=0;
170 nesdev->currcq_count = 0; 169 nesdev->currcq_count = 0;
@@ -1728,7 +1727,6 @@ int nes_napi_isr(struct nes_device *nesdev)
1728 nesdev->int_req &= ~NES_INT_TIMER; 1727 nesdev->int_req &= ~NES_INT_TIMER;
1729 nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); 1728 nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
1730 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); 1729 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
1731 nesadapter->tune_timer.timer_in_use_old = 0;
1732 } 1730 }
1733 nesdev->deepcq_count = 0; 1731 nesdev->deepcq_count = 0;
1734 return 1; 1732 return 1;
@@ -1867,7 +1865,6 @@ void nes_dpc(unsigned long param)
1867 nesdev->int_req &= ~NES_INT_TIMER; 1865 nesdev->int_req &= ~NES_INT_TIMER;
1868 nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); 1866 nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
1869 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); 1867 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
1870 nesdev->nesadapter->tune_timer.timer_in_use_old = 0;
1871 } else { 1868 } else {
1872 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); 1869 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
1873 } 1870 }
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 1e10df550c9e..b7e2844f096b 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -962,7 +962,7 @@ struct nes_arp_entry {
962#define DEFAULT_JUMBO_NES_QL_LOW 12 962#define DEFAULT_JUMBO_NES_QL_LOW 12
963#define DEFAULT_JUMBO_NES_QL_TARGET 40 963#define DEFAULT_JUMBO_NES_QL_TARGET 40
964#define DEFAULT_JUMBO_NES_QL_HIGH 128 964#define DEFAULT_JUMBO_NES_QL_HIGH 128
965#define NES_NIC_CQ_DOWNWARD_TREND 8 965#define NES_NIC_CQ_DOWNWARD_TREND 16
966 966
967struct nes_hw_tune_timer { 967struct nes_hw_tune_timer {
968 //u16 cq_count; 968 //u16 cq_count;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 4dafbe16e82a..a651e9d9f0ef 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -929,7 +929,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
929 NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); 929 NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
930 nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", 930 nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
931 nespd->mmap_db_index, nespd->pd_id); 931 nespd->mmap_db_index, nespd->pd_id);
932 if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) { 932 if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) {
933 nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); 933 nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n");
934 nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); 934 nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
935 kfree(nespd); 935 kfree(nespd);
@@ -1327,7 +1327,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1327 (long long unsigned int)req.user_wqe_buffers); 1327 (long long unsigned int)req.user_wqe_buffers);
1328 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); 1328 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1329 kfree(nesqp->allocated_buffer); 1329 kfree(nesqp->allocated_buffer);
1330 return ERR_PTR(-ENOMEM); 1330 return ERR_PTR(-EFAULT);
1331 } 1331 }
1332 } 1332 }
1333 1333
@@ -1674,6 +1674,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1674 } 1674 }
1675 nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", 1675 nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n",
1676 (unsigned long)req.user_cq_buffer, entries); 1676 (unsigned long)req.user_cq_buffer, entries);
1677 err = 1;
1677 list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { 1678 list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) {
1678 if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { 1679 if (nespbl->user_base == (unsigned long )req.user_cq_buffer) {
1679 list_del(&nespbl->list); 1680 list_del(&nespbl->list);
@@ -1686,7 +1687,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1686 if (err) { 1687 if (err) {
1687 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); 1688 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1688 kfree(nescq); 1689 kfree(nescq);
1689 return ERR_PTR(err); 1690 return ERR_PTR(-EFAULT);
1690 } 1691 }
1691 1692
1692 pbl_entries = nespbl->pbl_size >> 3; 1693 pbl_entries = nespbl->pbl_size >> 3;
@@ -1831,9 +1832,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1831 spin_unlock_irqrestore(&nesdev->cqp.lock, flags); 1832 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1832 } 1833 }
1833 } 1834 }
1834 nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X,"
1835 " minor code = 0x%04X\n",
1836 nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code);
1837 if (!context) 1835 if (!context)
1838 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, 1836 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1839 nescq->hw_cq.cq_pbase); 1837 nescq->hw_cq.cq_pbase);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 714b8db02b29..993f0a8ff28f 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -237,36 +237,32 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
237static 237static
238struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) 238struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
239{ 239{
240 struct list_head *p_list; 240 struct iser_device *device;
241 struct iser_device *device = NULL;
242 241
243 mutex_lock(&ig.device_list_mutex); 242 mutex_lock(&ig.device_list_mutex);
244 243
245 p_list = ig.device_list.next; 244 list_for_each_entry(device, &ig.device_list, ig_list)
246 while (p_list != &ig.device_list) {
247 device = list_entry(p_list, struct iser_device, ig_list);
248 /* find if there's a match using the node GUID */ 245 /* find if there's a match using the node GUID */
249 if (device->ib_device->node_guid == cma_id->device->node_guid) 246 if (device->ib_device->node_guid == cma_id->device->node_guid)
250 break; 247 goto inc_refcnt;
251 }
252 248
253 if (device == NULL) { 249 device = kzalloc(sizeof *device, GFP_KERNEL);
254 device = kzalloc(sizeof *device, GFP_KERNEL); 250 if (device == NULL)
255 if (device == NULL) 251 goto out;
256 goto out; 252
257 /* assign this device to the device */ 253 /* assign this device to the device */
258 device->ib_device = cma_id->device; 254 device->ib_device = cma_id->device;
259 /* init the device and link it into ig device list */ 255 /* init the device and link it into ig device list */
260 if (iser_create_device_ib_res(device)) { 256 if (iser_create_device_ib_res(device)) {
261 kfree(device); 257 kfree(device);
262 device = NULL; 258 device = NULL;
263 goto out; 259 goto out;
264 }
265 list_add(&device->ig_list, &ig.device_list);
266 } 260 }
267out: 261 list_add(&device->ig_list, &ig.device_list);
268 BUG_ON(device == NULL); 262
263inc_refcnt:
269 device->refcount++; 264 device->refcount++;
265out:
270 mutex_unlock(&ig.device_list_mutex); 266 mutex_unlock(&ig.device_list_mutex);
271 return device; 267 return device;
272} 268}
@@ -372,6 +368,12 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
372 int ret; 368 int ret;
373 369
374 device = iser_device_find_by_ib_device(cma_id); 370 device = iser_device_find_by_ib_device(cma_id);
371 if (!device) {
372 iser_err("device lookup/creation failed\n");
373 iser_connect_error(cma_id);
374 return;
375 }
376
375 ib_conn = (struct iser_conn *)cma_id->context; 377 ib_conn = (struct iser_conn *)cma_id->context;
376 ib_conn->device = device; 378 ib_conn->device = device;
377 379
@@ -380,7 +382,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
380 iser_err("resolve route failed: %d\n", ret); 382 iser_err("resolve route failed: %d\n", ret);
381 iser_connect_error(cma_id); 383 iser_connect_error(cma_id);
382 } 384 }
383 return;
384} 385}
385 386
386static void iser_route_handler(struct rdma_cm_id *cma_id) 387static void iser_route_handler(struct rdma_cm_id *cma_id)
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 8b10d9f23bef..c5263d63aca3 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -42,14 +42,14 @@ config INPUT_M68K_BEEP
42 42
43config INPUT_APANEL 43config INPUT_APANEL
44 tristate "Fujitsu Lifebook Application Panel buttons" 44 tristate "Fujitsu Lifebook Application Panel buttons"
45 depends on X86 45 depends on X86 && I2C && LEDS_CLASS
46 select I2C_I801
47 select INPUT_POLLDEV 46 select INPUT_POLLDEV
48 select CHECK_SIGNATURE 47 select CHECK_SIGNATURE
49 help 48 help
50 Say Y here for support of the Application Panel buttons, used on 49 Say Y here for support of the Application Panel buttons, used on
51 Fujitsu Lifebook. These are attached to the mainboard through 50 Fujitsu Lifebook. These are attached to the mainboard through
52 an SMBus interface managed by the I2C Intel ICH (i801) driver. 51 an SMBus interface managed by the I2C Intel ICH (i801) driver,
52 which you should also build for this kernel.
53 53
54 To compile this driver as a module, choose M here: the module will 54 To compile this driver as a module, choose M here: the module will
55 be called apanel. 55 be called apanel.
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index dd22d91f8b39..c972e5d03a3f 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -16,7 +16,7 @@
16 16
17#if defined(CONFIG_MACH_JAZZ) 17#if defined(CONFIG_MACH_JAZZ)
18#include "i8042-jazzio.h" 18#include "i8042-jazzio.h"
19#elif defined(CONFIG_SGI_IP22) 19#elif defined(CONFIG_SGI_HAS_I8042)
20#include "i8042-ip22io.h" 20#include "i8042-ip22io.h"
21#elif defined(CONFIG_PPC) 21#elif defined(CONFIG_PPC)
22#include "i8042-ppcio.h" 22#include "i8042-ppcio.h"
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index aacedec4986f..827c32c16795 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -637,7 +637,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
637 err("maximum number of devices exceeded"); 637 err("maximum number of devices exceeded");
638 return NULL; 638 return NULL;
639 } 639 }
640 mutex_init(&cs->mutex);
641 640
642 gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1); 641 gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1);
643 cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL); 642 cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
@@ -898,8 +897,10 @@ int gigaset_shutdown(struct cardstate *cs)
898{ 897{
899 mutex_lock(&cs->mutex); 898 mutex_lock(&cs->mutex);
900 899
901 if (!(cs->flags & VALID_MINOR)) 900 if (!(cs->flags & VALID_MINOR)) {
901 mutex_unlock(&cs->mutex);
902 return -1; 902 return -1;
903 }
903 904
904 cs->waiting = 1; 905 cs->waiting = 1;
905 906
@@ -1086,6 +1087,7 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
1086 drv->cs[i].driver = drv; 1087 drv->cs[i].driver = drv;
1087 drv->cs[i].ops = drv->ops; 1088 drv->cs[i].ops = drv->ops;
1088 drv->cs[i].minor_index = i; 1089 drv->cs[i].minor_index = i;
1090 mutex_init(&drv->cs[i].mutex);
1089 } 1091 }
1090 1092
1091 gigaset_if_initdriver(drv, procname, devname); 1093 gigaset_if_initdriver(drv, procname, devname);
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 7993e01f9fc5..76043dedba5b 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -725,23 +725,6 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter)
725 725
726 switch (adapter->type) { 726 switch (adapter->type) {
727 case AVM_FRITZ_PCIV2: 727 case AVM_FRITZ_PCIV2:
728 retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED,
729 "fcpcipnp", adapter);
730 break;
731 case AVM_FRITZ_PCI:
732 retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED,
733 "fcpcipnp", adapter);
734 break;
735 case AVM_FRITZ_PNP:
736 retval = request_irq(adapter->irq, fcpci_irq, 0,
737 "fcpcipnp", adapter);
738 break;
739 }
740 if (retval)
741 goto err_region;
742
743 switch (adapter->type) {
744 case AVM_FRITZ_PCIV2:
745 case AVM_FRITZ_PCI: 728 case AVM_FRITZ_PCI:
746 val = inl(adapter->io); 729 val = inl(adapter->io);
747 break; 730 break;
@@ -796,6 +779,23 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter)
796 779
797 switch (adapter->type) { 780 switch (adapter->type) {
798 case AVM_FRITZ_PCIV2: 781 case AVM_FRITZ_PCIV2:
782 retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED,
783 "fcpcipnp", adapter);
784 break;
785 case AVM_FRITZ_PCI:
786 retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED,
787 "fcpcipnp", adapter);
788 break;
789 case AVM_FRITZ_PNP:
790 retval = request_irq(adapter->irq, fcpci_irq, 0,
791 "fcpcipnp", adapter);
792 break;
793 }
794 if (retval)
795 goto err_region;
796
797 switch (adapter->type) {
798 case AVM_FRITZ_PCIV2:
799 fcpci2_init(adapter); 799 fcpci2_init(adapter);
800 isacsx_setup(&adapter->isac); 800 isacsx_setup(&adapter->isac);
801 break; 801 break;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 9cef6fcf587b..d4ad6992f776 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -981,13 +981,13 @@ isdn_readbchan_tty(int di, int channel, struct tty_struct *tty, int cisco_hack)
981} 981}
982 982
983 983
984static __inline int 984static inline int
985isdn_minor2drv(int minor) 985isdn_minor2drv(int minor)
986{ 986{
987 return (dev->drvmap[minor]); 987 return (dev->drvmap[minor]);
988} 988}
989 989
990static __inline int 990static inline int
991isdn_minor2chan(int minor) 991isdn_minor2chan(int minor)
992{ 992{
993 return (dev->chanmap[minor]); 993 return (dev->chanmap[minor]);
diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c
index f93de4a30355..78f7660c1d0e 100644
--- a/drivers/isdn/i4l/isdn_ttyfax.c
+++ b/drivers/isdn/i4l/isdn_ttyfax.c
@@ -906,7 +906,8 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info)
906 sprintf(rs, "\r\n0-2"); 906 sprintf(rs, "\r\n0-2");
907 isdn_tty_at_cout(rs, info); 907 isdn_tty_at_cout(rs, info);
908 } else { 908 } else {
909 if ((f->phase != ISDN_FAX_PHASE_D) || (!info->faxonline & 1)) 909 if ((f->phase != ISDN_FAX_PHASE_D) ||
910 (!(info->faxonline & 1)))
910 PARSE_ERROR1; 911 PARSE_ERROR1;
911 par = isdn_getnum(p); 912 par = isdn_getnum(p);
912 if ((par < 0) || (par > 2)) 913 if ((par < 0) || (par > 2))
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
index 5484d3c38a57..c5d02b6aafab 100644
--- a/drivers/isdn/i4l/isdn_v110.c
+++ b/drivers/isdn/i4l/isdn_v110.c
@@ -62,7 +62,7 @@ static unsigned char V110_OffMatrix_38400[] =
62 * and to 67452301 when keylen = 2. This is necessary because ordering on 62 * and to 67452301 when keylen = 2. This is necessary because ordering on
63 * the isdn line is the other way. 63 * the isdn line is the other way.
64 */ 64 */
65static __inline unsigned char 65static inline unsigned char
66FlipBits(unsigned char c, int keylen) 66FlipBits(unsigned char c, int keylen)
67{ 67{
68 unsigned char b = c; 68 unsigned char b = c;
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 655ef9a3f4df..a335c85a736e 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -1289,7 +1289,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
1289 } 1289 }
1290 break; 1290 break;
1291 case ISDN_CMD_CLREAZ: 1291 case ISDN_CMD_CLREAZ:
1292 if (!card->flags & ISDNLOOP_FLAGS_RUNNING) 1292 if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
1293 return -ENODEV; 1293 return -ENODEV;
1294 if (card->leased) 1294 if (card->leased)
1295 break; 1295 break;
@@ -1333,7 +1333,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
1333 } 1333 }
1334 break; 1334 break;
1335 case ISDN_CMD_SETL3: 1335 case ISDN_CMD_SETL3:
1336 if (!card->flags & ISDNLOOP_FLAGS_RUNNING) 1336 if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
1337 return -ENODEV; 1337 return -ENODEV;
1338 return 0; 1338 return 0;
1339 default: 1339 default:
@@ -1380,7 +1380,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
1380 isdnloop_card *card = isdnloop_findcard(id); 1380 isdnloop_card *card = isdnloop_findcard(id);
1381 1381
1382 if (card) { 1382 if (card) {
1383 if (!card->flags & ISDNLOOP_FLAGS_RUNNING) 1383 if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
1384 return -ENODEV; 1384 return -ENODEV;
1385 return (isdnloop_writecmd(buf, len, 1, card)); 1385 return (isdnloop_writecmd(buf, len, 1, card));
1386 } 1386 }
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 7743d73768df..c632c08cbbdc 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -69,11 +69,22 @@ static __init int map_switcher(void)
69 switcher_page[i] = virt_to_page(addr); 69 switcher_page[i] = virt_to_page(addr);
70 } 70 }
71 71
72 /* First we check that the Switcher won't overlap the fixmap area at
73 * the top of memory. It's currently nowhere near, but it could have
74 * very strange effects if it ever happened. */
75 if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){
76 err = -ENOMEM;
77 printk("lguest: mapping switcher would thwack fixmap\n");
78 goto free_pages;
79 }
80
72 /* Now we reserve the "virtual memory area" we want: 0xFFC00000 81 /* Now we reserve the "virtual memory area" we want: 0xFFC00000
73 * (SWITCHER_ADDR). We might not get it in theory, but in practice 82 * (SWITCHER_ADDR). We might not get it in theory, but in practice
74 * it's worked so far. */ 83 * it's worked so far. The end address needs +1 because __get_vm_area
84 * allocates an extra guard page, so we need space for that. */
75 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, 85 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
76 VM_ALLOC, SWITCHER_ADDR, VMALLOC_END); 86 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
87 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
77 if (!switcher_vma) { 88 if (!switcher_vma) {
78 err = -ENOMEM; 89 err = -ENOMEM;
79 printk("lguest: could not map switcher pages high\n"); 90 printk("lguest: could not map switcher pages high\n");
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 85d42d3d01a9..2221485b0773 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -241,15 +241,16 @@ static ssize_t write(struct file *file, const char __user *in,
241 cpu = &lg->cpus[cpu_id]; 241 cpu = &lg->cpus[cpu_id];
242 if (!cpu) 242 if (!cpu)
243 return -EINVAL; 243 return -EINVAL;
244 }
245 244
246 /* Once the Guest is dead, all you can do is read() why it died. */ 245 /* Once the Guest is dead, you can only read() why it died. */
247 if (lg && lg->dead) 246 if (lg->dead)
248 return -ENOENT; 247 return -ENOENT;
249 248
250 /* If you're not the task which owns the Guest, you can only break */ 249 /* If you're not the task which owns the Guest, all you can do
251 if (lg && current != cpu->tsk && req != LHREQ_BREAK) 250 * is break the Launcher out of running the Guest. */
252 return -EPERM; 251 if (current != cpu->tsk && req != LHREQ_BREAK)
252 return -EPERM;
253 }
253 254
254 switch (req) { 255 switch (req) {
255 case LHREQ_INITIALIZE: 256 case LHREQ_INITIALIZE:
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 275f23c2deb4..a7f64a9d67e0 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -391,7 +391,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
391{ 391{
392 unsigned int i; 392 unsigned int i;
393 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 393 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
394 if (lg->pgdirs[i].gpgdir == pgtable) 394 if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
395 break; 395 break;
396 return i; 396 return i;
397} 397}
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 51a112815f46..bd8a1d14b45d 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -698,7 +698,8 @@ static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
698{ 698{
699 struct media_bay_info *bay = macio_get_drvdata(mdev); 699 struct media_bay_info *bay = macio_get_drvdata(mdev);
700 700
701 if (state.event != mdev->ofdev.dev.power.power_state.event && state.event == PM_EVENT_SUSPEND) { 701 if (state.event != mdev->ofdev.dev.power.power_state.event
702 && (state.event & PM_EVENT_SLEEP)) {
702 down(&bay->lock); 703 down(&bay->lock);
703 bay->sleeping = 1; 704 bay->sleeping = 1;
704 set_mb_power(bay, 0); 705 set_mb_power(bay, 0);
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 741a2e3f4fc6..a348bb0791d3 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -17,7 +17,7 @@
17 17
18static struct backlight_ops pmu_backlight_data; 18static struct backlight_ops pmu_backlight_data;
19static DEFINE_SPINLOCK(pmu_backlight_lock); 19static DEFINE_SPINLOCK(pmu_backlight_lock);
20static int sleeping; 20static int sleeping, uses_pmu_bl;
21static u8 bl_curve[FB_BACKLIGHT_LEVELS]; 21static u8 bl_curve[FB_BACKLIGHT_LEVELS];
22 22
23static void pmu_backlight_init_curve(u8 off, u8 min, u8 max) 23static void pmu_backlight_init_curve(u8 off, u8 min, u8 max)
@@ -128,7 +128,7 @@ void pmu_backlight_set_sleep(int sleep)
128 128
129 spin_lock_irqsave(&pmu_backlight_lock, flags); 129 spin_lock_irqsave(&pmu_backlight_lock, flags);
130 sleeping = sleep; 130 sleeping = sleep;
131 if (pmac_backlight) { 131 if (pmac_backlight && uses_pmu_bl) {
132 if (sleep) { 132 if (sleep) {
133 struct adb_request req; 133 struct adb_request req;
134 134
@@ -166,6 +166,7 @@ void __init pmu_backlight_init()
166 printk(KERN_ERR "PMU Backlight registration failed\n"); 166 printk(KERN_ERR "PMU Backlight registration failed\n");
167 return; 167 return;
168 } 168 }
169 uses_pmu_bl = 1;
169 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; 170 bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
170 pmu_backlight_init_curve(0x7F, 0x46, 0x0E); 171 pmu_backlight_init_curve(0x7F, 0x46, 0x0E);
171 172
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index ebec663d5d37..d6365a9f0637 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2528,7 +2528,7 @@ EXPORT_SYMBOL(pmu_wait_complete);
2528EXPORT_SYMBOL(pmu_suspend); 2528EXPORT_SYMBOL(pmu_suspend);
2529EXPORT_SYMBOL(pmu_resume); 2529EXPORT_SYMBOL(pmu_resume);
2530EXPORT_SYMBOL(pmu_unlock); 2530EXPORT_SYMBOL(pmu_unlock);
2531#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) 2531#if defined(CONFIG_PPC32)
2532EXPORT_SYMBOL(pmu_enable_irled); 2532EXPORT_SYMBOL(pmu_enable_irled);
2533EXPORT_SYMBOL(pmu_battery_count); 2533EXPORT_SYMBOL(pmu_battery_count);
2534EXPORT_SYMBOL(pmu_batteries); 2534EXPORT_SYMBOL(pmu_batteries);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7aeceedcf7d4..c14dacdacfac 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1045,8 +1045,14 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1045 if (bitmap == NULL) 1045 if (bitmap == NULL)
1046 return; 1046 return;
1047 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) 1047 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
1048 return; 1048 goto done;
1049
1049 bitmap->daemon_lastrun = jiffies; 1050 bitmap->daemon_lastrun = jiffies;
1051 if (bitmap->allclean) {
1052 bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
1053 return;
1054 }
1055 bitmap->allclean = 1;
1050 1056
1051 for (j = 0; j < bitmap->chunks; j++) { 1057 for (j = 0; j < bitmap->chunks; j++) {
1052 bitmap_counter_t *bmc; 1058 bitmap_counter_t *bmc;
@@ -1068,8 +1074,10 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1068 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 1074 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1069 1075
1070 spin_unlock_irqrestore(&bitmap->lock, flags); 1076 spin_unlock_irqrestore(&bitmap->lock, flags);
1071 if (need_write) 1077 if (need_write) {
1072 write_page(bitmap, page, 0); 1078 write_page(bitmap, page, 0);
1079 bitmap->allclean = 0;
1080 }
1073 continue; 1081 continue;
1074 } 1082 }
1075 1083
@@ -1098,6 +1106,9 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1098/* 1106/*
1099 if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); 1107 if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
1100*/ 1108*/
1109 if (*bmc)
1110 bitmap->allclean = 0;
1111
1101 if (*bmc == 2) { 1112 if (*bmc == 2) {
1102 *bmc=1; /* maybe clear the bit next time */ 1113 *bmc=1; /* maybe clear the bit next time */
1103 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); 1114 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
@@ -1132,6 +1143,9 @@ void bitmap_daemon_work(struct bitmap *bitmap)
1132 } 1143 }
1133 } 1144 }
1134 1145
1146 done:
1147 if (bitmap->allclean == 0)
1148 bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ;
1135} 1149}
1136 1150
1137static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, 1151static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1226,6 +1240,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1226 sectors -= blocks; 1240 sectors -= blocks;
1227 else sectors = 0; 1241 else sectors = 0;
1228 } 1242 }
1243 bitmap->allclean = 0;
1229 return 0; 1244 return 0;
1230} 1245}
1231 1246
@@ -1296,6 +1311,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
1296 } 1311 }
1297 } 1312 }
1298 spin_unlock_irq(&bitmap->lock); 1313 spin_unlock_irq(&bitmap->lock);
1314 bitmap->allclean = 0;
1299 return rv; 1315 return rv;
1300} 1316}
1301 1317
@@ -1332,6 +1348,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab
1332 } 1348 }
1333 unlock: 1349 unlock:
1334 spin_unlock_irqrestore(&bitmap->lock, flags); 1350 spin_unlock_irqrestore(&bitmap->lock, flags);
1351 bitmap->allclean = 0;
1335} 1352}
1336 1353
1337void bitmap_close_sync(struct bitmap *bitmap) 1354void bitmap_close_sync(struct bitmap *bitmap)
@@ -1399,7 +1416,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
1399 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); 1416 set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
1400 } 1417 }
1401 spin_unlock_irq(&bitmap->lock); 1418 spin_unlock_irq(&bitmap->lock);
1402 1419 bitmap->allclean = 0;
1403} 1420}
1404 1421
1405/* dirty the memory and file bits for bitmap chunks "s" to "e" */ 1422/* dirty the memory and file bits for bitmap chunks "s" to "e" */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 7da6ec244e15..ccbbf63727cc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1105,7 +1105,11 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1105 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1105 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1106 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1106 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1107 if (rdev->sb_size & bmask) 1107 if (rdev->sb_size & bmask)
1108 rdev-> sb_size = (rdev->sb_size | bmask)+1; 1108 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1109
1110 if (minor_version
1111 && rdev->data_offset < sb_offset + (rdev->sb_size/512))
1112 return -EINVAL;
1109 1113
1110 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1114 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1111 rdev->desc_nr = -1; 1115 rdev->desc_nr = -1;
@@ -1137,7 +1141,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1137 else 1141 else
1138 ret = 0; 1142 ret = 0;
1139 } 1143 }
1140 if (minor_version) 1144 if (minor_version)
1141 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1145 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1142 else 1146 else
1143 rdev->size = rdev->sb_offset; 1147 rdev->size = rdev->sb_offset;
@@ -1499,7 +1503,8 @@ static void export_rdev(mdk_rdev_t * rdev)
1499 free_disk_sb(rdev); 1503 free_disk_sb(rdev);
1500 list_del_init(&rdev->same_set); 1504 list_del_init(&rdev->same_set);
1501#ifndef MODULE 1505#ifndef MODULE
1502 md_autodetect_dev(rdev->bdev->bd_dev); 1506 if (test_bit(AutoDetected, &rdev->flags))
1507 md_autodetect_dev(rdev->bdev->bd_dev);
1503#endif 1508#endif
1504 unlock_rdev(rdev); 1509 unlock_rdev(rdev);
1505 kobject_put(&rdev->kobj); 1510 kobject_put(&rdev->kobj);
@@ -1996,9 +2001,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1996 char *e; 2001 char *e;
1997 unsigned long long size = simple_strtoull(buf, &e, 10); 2002 unsigned long long size = simple_strtoull(buf, &e, 10);
1998 unsigned long long oldsize = rdev->size; 2003 unsigned long long oldsize = rdev->size;
2004 mddev_t *my_mddev = rdev->mddev;
2005
1999 if (e==buf || (*e && *e != '\n')) 2006 if (e==buf || (*e && *e != '\n'))
2000 return -EINVAL; 2007 return -EINVAL;
2001 if (rdev->mddev->pers) 2008 if (my_mddev->pers)
2002 return -EBUSY; 2009 return -EBUSY;
2003 rdev->size = size; 2010 rdev->size = size;
2004 if (size > oldsize && rdev->mddev->external) { 2011 if (size > oldsize && rdev->mddev->external) {
@@ -2011,7 +2018,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2011 int overlap = 0; 2018 int overlap = 0;
2012 struct list_head *tmp, *tmp2; 2019 struct list_head *tmp, *tmp2;
2013 2020
2014 mddev_unlock(rdev->mddev); 2021 mddev_unlock(my_mddev);
2015 for_each_mddev(mddev, tmp) { 2022 for_each_mddev(mddev, tmp) {
2016 mdk_rdev_t *rdev2; 2023 mdk_rdev_t *rdev2;
2017 2024
@@ -2031,7 +2038,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2031 break; 2038 break;
2032 } 2039 }
2033 } 2040 }
2034 mddev_lock(rdev->mddev); 2041 mddev_lock(my_mddev);
2035 if (overlap) { 2042 if (overlap) {
2036 /* Someone else could have slipped in a size 2043 /* Someone else could have slipped in a size
2037 * change here, but doing so is just silly. 2044 * change here, but doing so is just silly.
@@ -2043,8 +2050,8 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2043 return -EBUSY; 2050 return -EBUSY;
2044 } 2051 }
2045 } 2052 }
2046 if (size < rdev->mddev->size || rdev->mddev->size == 0) 2053 if (size < my_mddev->size || my_mddev->size == 0)
2047 rdev->mddev->size = size; 2054 my_mddev->size = size;
2048 return len; 2055 return len;
2049} 2056}
2050 2057
@@ -2065,10 +2072,21 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2065{ 2072{
2066 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2073 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2067 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2074 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2075 mddev_t *mddev = rdev->mddev;
2076 ssize_t rv;
2068 2077
2069 if (!entry->show) 2078 if (!entry->show)
2070 return -EIO; 2079 return -EIO;
2071 return entry->show(rdev, page); 2080
2081 rv = mddev ? mddev_lock(mddev) : -EBUSY;
2082 if (!rv) {
2083 if (rdev->mddev == NULL)
2084 rv = -EBUSY;
2085 else
2086 rv = entry->show(rdev, page);
2087 mddev_unlock(mddev);
2088 }
2089 return rv;
2072} 2090}
2073 2091
2074static ssize_t 2092static ssize_t
@@ -2077,15 +2095,19 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2077{ 2095{
2078 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2096 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2079 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2097 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2080 int rv; 2098 ssize_t rv;
2099 mddev_t *mddev = rdev->mddev;
2081 2100
2082 if (!entry->store) 2101 if (!entry->store)
2083 return -EIO; 2102 return -EIO;
2084 if (!capable(CAP_SYS_ADMIN)) 2103 if (!capable(CAP_SYS_ADMIN))
2085 return -EACCES; 2104 return -EACCES;
2086 rv = mddev_lock(rdev->mddev); 2105 rv = mddev ? mddev_lock(mddev): -EBUSY;
2087 if (!rv) { 2106 if (!rv) {
2088 rv = entry->store(rdev, page, length); 2107 if (rdev->mddev == NULL)
2108 rv = -EBUSY;
2109 else
2110 rv = entry->store(rdev, page, length);
2089 mddev_unlock(rdev->mddev); 2111 mddev_unlock(rdev->mddev);
2090 } 2112 }
2091 return rv; 2113 return rv;
@@ -5127,7 +5149,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
5127 if (mddev->ro==1) 5149 if (mddev->ro==1)
5128 seq_printf(seq, " (read-only)"); 5150 seq_printf(seq, " (read-only)");
5129 if (mddev->ro==2) 5151 if (mddev->ro==2)
5130 seq_printf(seq, "(auto-read-only)"); 5152 seq_printf(seq, " (auto-read-only)");
5131 seq_printf(seq, " %s", mddev->pers->name); 5153 seq_printf(seq, " %s", mddev->pers->name);
5132 } 5154 }
5133 5155
@@ -5351,6 +5373,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
5351 mddev->ro = 0; 5373 mddev->ro = 0;
5352 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5374 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5353 md_wakeup_thread(mddev->thread); 5375 md_wakeup_thread(mddev->thread);
5376 md_wakeup_thread(mddev->sync_thread);
5354 } 5377 }
5355 atomic_inc(&mddev->writes_pending); 5378 atomic_inc(&mddev->writes_pending);
5356 if (mddev->in_sync) { 5379 if (mddev->in_sync) {
@@ -6021,6 +6044,7 @@ static void autostart_arrays(int part)
6021 MD_BUG(); 6044 MD_BUG();
6022 continue; 6045 continue;
6023 } 6046 }
6047 set_bit(AutoDetected, &rdev->flags);
6024 list_add(&rdev->same_set, &pending_raid_disks); 6048 list_add(&rdev->same_set, &pending_raid_disks);
6025 i_passed++; 6049 i_passed++;
6026 } 6050 }
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5c7fef091cec..ff61b309129a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -592,6 +592,37 @@ static int raid1_congested(void *data, int bits)
592} 592}
593 593
594 594
595static int flush_pending_writes(conf_t *conf)
596{
597 /* Any writes that have been queued but are awaiting
598 * bitmap updates get flushed here.
599 * We return 1 if any requests were actually submitted.
600 */
601 int rv = 0;
602
603 spin_lock_irq(&conf->device_lock);
604
605 if (conf->pending_bio_list.head) {
606 struct bio *bio;
607 bio = bio_list_get(&conf->pending_bio_list);
608 blk_remove_plug(conf->mddev->queue);
609 spin_unlock_irq(&conf->device_lock);
610 /* flush any pending bitmap writes to
611 * disk before proceeding w/ I/O */
612 bitmap_unplug(conf->mddev->bitmap);
613
614 while (bio) { /* submit pending writes */
615 struct bio *next = bio->bi_next;
616 bio->bi_next = NULL;
617 generic_make_request(bio);
618 bio = next;
619 }
620 rv = 1;
621 } else
622 spin_unlock_irq(&conf->device_lock);
623 return rv;
624}
625
595/* Barriers.... 626/* Barriers....
596 * Sometimes we need to suspend IO while we do something else, 627 * Sometimes we need to suspend IO while we do something else,
597 * either some resync/recovery, or reconfigure the array. 628 * either some resync/recovery, or reconfigure the array.
@@ -673,15 +704,23 @@ static void freeze_array(conf_t *conf)
673 /* stop syncio and normal IO and wait for everything to 704 /* stop syncio and normal IO and wait for everything to
674 * go quite. 705 * go quite.
675 * We increment barrier and nr_waiting, and then 706 * We increment barrier and nr_waiting, and then
676 * wait until barrier+nr_pending match nr_queued+2 707 * wait until nr_pending match nr_queued+1
708 * This is called in the context of one normal IO request
709 * that has failed. Thus any sync request that might be pending
710 * will be blocked by nr_pending, and we need to wait for
711 * pending IO requests to complete or be queued for re-try.
712 * Thus the number queued (nr_queued) plus this request (1)
713 * must match the number of pending IOs (nr_pending) before
714 * we continue.
677 */ 715 */
678 spin_lock_irq(&conf->resync_lock); 716 spin_lock_irq(&conf->resync_lock);
679 conf->barrier++; 717 conf->barrier++;
680 conf->nr_waiting++; 718 conf->nr_waiting++;
681 wait_event_lock_irq(conf->wait_barrier, 719 wait_event_lock_irq(conf->wait_barrier,
682 conf->barrier+conf->nr_pending == conf->nr_queued+2, 720 conf->nr_pending == conf->nr_queued+1,
683 conf->resync_lock, 721 conf->resync_lock,
684 raid1_unplug(conf->mddev->queue)); 722 ({ flush_pending_writes(conf);
723 raid1_unplug(conf->mddev->queue); }));
685 spin_unlock_irq(&conf->resync_lock); 724 spin_unlock_irq(&conf->resync_lock);
686} 725}
687static void unfreeze_array(conf_t *conf) 726static void unfreeze_array(conf_t *conf)
@@ -907,6 +946,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
907 blk_plug_device(mddev->queue); 946 blk_plug_device(mddev->queue);
908 spin_unlock_irqrestore(&conf->device_lock, flags); 947 spin_unlock_irqrestore(&conf->device_lock, flags);
909 948
949 /* In case raid1d snuck into freeze_array */
950 wake_up(&conf->wait_barrier);
951
910 if (do_sync) 952 if (do_sync)
911 md_wakeup_thread(mddev->thread); 953 md_wakeup_thread(mddev->thread);
912#if 0 954#if 0
@@ -1473,28 +1515,14 @@ static void raid1d(mddev_t *mddev)
1473 1515
1474 for (;;) { 1516 for (;;) {
1475 char b[BDEVNAME_SIZE]; 1517 char b[BDEVNAME_SIZE];
1476 spin_lock_irqsave(&conf->device_lock, flags);
1477
1478 if (conf->pending_bio_list.head) {
1479 bio = bio_list_get(&conf->pending_bio_list);
1480 blk_remove_plug(mddev->queue);
1481 spin_unlock_irqrestore(&conf->device_lock, flags);
1482 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1483 bitmap_unplug(mddev->bitmap);
1484 1518
1485 while (bio) { /* submit pending writes */ 1519 unplug += flush_pending_writes(conf);
1486 struct bio *next = bio->bi_next;
1487 bio->bi_next = NULL;
1488 generic_make_request(bio);
1489 bio = next;
1490 }
1491 unplug = 1;
1492 1520
1493 continue; 1521 spin_lock_irqsave(&conf->device_lock, flags);
1494 } 1522 if (list_empty(head)) {
1495 1523 spin_unlock_irqrestore(&conf->device_lock, flags);
1496 if (list_empty(head))
1497 break; 1524 break;
1525 }
1498 r1_bio = list_entry(head->prev, r1bio_t, retry_list); 1526 r1_bio = list_entry(head->prev, r1bio_t, retry_list);
1499 list_del(head->prev); 1527 list_del(head->prev);
1500 conf->nr_queued--; 1528 conf->nr_queued--;
@@ -1590,7 +1618,6 @@ static void raid1d(mddev_t *mddev)
1590 } 1618 }
1591 } 1619 }
1592 } 1620 }
1593 spin_unlock_irqrestore(&conf->device_lock, flags);
1594 if (unplug) 1621 if (unplug)
1595 unplug_slaves(mddev); 1622 unplug_slaves(mddev);
1596} 1623}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 017f58113c33..32389d2f18fc 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -537,7 +537,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
537 current_distance = abs(r10_bio->devs[slot].addr - 537 current_distance = abs(r10_bio->devs[slot].addr -
538 conf->mirrors[disk].head_position); 538 conf->mirrors[disk].head_position);
539 539
540 /* Find the disk whose head is closest */ 540 /* Find the disk whose head is closest,
541 * or - for far > 1 - find the closest to partition beginning */
541 542
542 for (nslot = slot; nslot < conf->copies; nslot++) { 543 for (nslot = slot; nslot < conf->copies; nslot++) {
543 int ndisk = r10_bio->devs[nslot].devnum; 544 int ndisk = r10_bio->devs[nslot].devnum;
@@ -557,8 +558,13 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
557 slot = nslot; 558 slot = nslot;
558 break; 559 break;
559 } 560 }
560 new_distance = abs(r10_bio->devs[nslot].addr - 561
561 conf->mirrors[ndisk].head_position); 562 /* for far > 1 always use the lowest address */
563 if (conf->far_copies > 1)
564 new_distance = r10_bio->devs[nslot].addr;
565 else
566 new_distance = abs(r10_bio->devs[nslot].addr -
567 conf->mirrors[ndisk].head_position);
562 if (new_distance < current_distance) { 568 if (new_distance < current_distance) {
563 current_distance = new_distance; 569 current_distance = new_distance;
564 disk = ndisk; 570 disk = ndisk;
@@ -629,7 +635,36 @@ static int raid10_congested(void *data, int bits)
629 return ret; 635 return ret;
630} 636}
631 637
632 638static int flush_pending_writes(conf_t *conf)
639{
640 /* Any writes that have been queued but are awaiting
641 * bitmap updates get flushed here.
642 * We return 1 if any requests were actually submitted.
643 */
644 int rv = 0;
645
646 spin_lock_irq(&conf->device_lock);
647
648 if (conf->pending_bio_list.head) {
649 struct bio *bio;
650 bio = bio_list_get(&conf->pending_bio_list);
651 blk_remove_plug(conf->mddev->queue);
652 spin_unlock_irq(&conf->device_lock);
653 /* flush any pending bitmap writes to disk
654 * before proceeding w/ I/O */
655 bitmap_unplug(conf->mddev->bitmap);
656
657 while (bio) { /* submit pending writes */
658 struct bio *next = bio->bi_next;
659 bio->bi_next = NULL;
660 generic_make_request(bio);
661 bio = next;
662 }
663 rv = 1;
664 } else
665 spin_unlock_irq(&conf->device_lock);
666 return rv;
667}
633/* Barriers.... 668/* Barriers....
634 * Sometimes we need to suspend IO while we do something else, 669 * Sometimes we need to suspend IO while we do something else,
635 * either some resync/recovery, or reconfigure the array. 670 * either some resync/recovery, or reconfigure the array.
@@ -712,15 +747,23 @@ static void freeze_array(conf_t *conf)
712 /* stop syncio and normal IO and wait for everything to 747 /* stop syncio and normal IO and wait for everything to
713 * go quiet. 748 * go quiet.
714 * We increment barrier and nr_waiting, and then 749 * We increment barrier and nr_waiting, and then
715 * wait until barrier+nr_pending match nr_queued+2 750 * wait until nr_pending match nr_queued+1
751 * This is called in the context of one normal IO request
752 * that has failed. Thus any sync request that might be pending
753 * will be blocked by nr_pending, and we need to wait for
754 * pending IO requests to complete or be queued for re-try.
755 * Thus the number queued (nr_queued) plus this request (1)
756 * must match the number of pending IOs (nr_pending) before
757 * we continue.
716 */ 758 */
717 spin_lock_irq(&conf->resync_lock); 759 spin_lock_irq(&conf->resync_lock);
718 conf->barrier++; 760 conf->barrier++;
719 conf->nr_waiting++; 761 conf->nr_waiting++;
720 wait_event_lock_irq(conf->wait_barrier, 762 wait_event_lock_irq(conf->wait_barrier,
721 conf->barrier+conf->nr_pending == conf->nr_queued+2, 763 conf->nr_pending == conf->nr_queued+1,
722 conf->resync_lock, 764 conf->resync_lock,
723 raid10_unplug(conf->mddev->queue)); 765 ({ flush_pending_writes(conf);
766 raid10_unplug(conf->mddev->queue); }));
724 spin_unlock_irq(&conf->resync_lock); 767 spin_unlock_irq(&conf->resync_lock);
725} 768}
726 769
@@ -892,6 +935,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
892 blk_plug_device(mddev->queue); 935 blk_plug_device(mddev->queue);
893 spin_unlock_irqrestore(&conf->device_lock, flags); 936 spin_unlock_irqrestore(&conf->device_lock, flags);
894 937
938 /* In case raid10d snuck in to freeze_array */
939 wake_up(&conf->wait_barrier);
940
895 if (do_sync) 941 if (do_sync)
896 md_wakeup_thread(mddev->thread); 942 md_wakeup_thread(mddev->thread);
897 943
@@ -1464,28 +1510,14 @@ static void raid10d(mddev_t *mddev)
1464 1510
1465 for (;;) { 1511 for (;;) {
1466 char b[BDEVNAME_SIZE]; 1512 char b[BDEVNAME_SIZE];
1467 spin_lock_irqsave(&conf->device_lock, flags);
1468 1513
1469 if (conf->pending_bio_list.head) { 1514 unplug += flush_pending_writes(conf);
1470 bio = bio_list_get(&conf->pending_bio_list);
1471 blk_remove_plug(mddev->queue);
1472 spin_unlock_irqrestore(&conf->device_lock, flags);
1473 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1474 bitmap_unplug(mddev->bitmap);
1475
1476 while (bio) { /* submit pending writes */
1477 struct bio *next = bio->bi_next;
1478 bio->bi_next = NULL;
1479 generic_make_request(bio);
1480 bio = next;
1481 }
1482 unplug = 1;
1483
1484 continue;
1485 }
1486 1515
1487 if (list_empty(head)) 1516 spin_lock_irqsave(&conf->device_lock, flags);
1517 if (list_empty(head)) {
1518 spin_unlock_irqrestore(&conf->device_lock, flags);
1488 break; 1519 break;
1520 }
1489 r10_bio = list_entry(head->prev, r10bio_t, retry_list); 1521 r10_bio = list_entry(head->prev, r10bio_t, retry_list);
1490 list_del(head->prev); 1522 list_del(head->prev);
1491 conf->nr_queued--; 1523 conf->nr_queued--;
@@ -1548,7 +1580,6 @@ static void raid10d(mddev_t *mddev)
1548 } 1580 }
1549 } 1581 }
1550 } 1582 }
1551 spin_unlock_irqrestore(&conf->device_lock, flags);
1552 if (unplug) 1583 if (unplug)
1553 unplug_slaves(mddev); 1584 unplug_slaves(mddev);
1554} 1585}
@@ -1787,6 +1818,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1787 if (j == conf->copies) { 1818 if (j == conf->copies) {
1788 /* Cannot recover, so abort the recovery */ 1819 /* Cannot recover, so abort the recovery */
1789 put_buf(r10_bio); 1820 put_buf(r10_bio);
1821 if (rb2)
1822 atomic_dec(&rb2->remaining);
1790 r10_bio = rb2; 1823 r10_bio = rb2;
1791 if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) 1824 if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery))
1792 printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", 1825 printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n",
diff --git a/drivers/memstick/Kconfig b/drivers/memstick/Kconfig
index 1093fdb07297..f0ca41c20323 100644
--- a/drivers/memstick/Kconfig
+++ b/drivers/memstick/Kconfig
@@ -8,7 +8,7 @@ menuconfig MEMSTICK
8 Sony MemoryStick is a proprietary storage/extension card protocol. 8 Sony MemoryStick is a proprietary storage/extension card protocol.
9 9
10 If you want MemoryStick support, you should say Y here and also 10 If you want MemoryStick support, you should say Y here and also
11 to the specific driver for your MMC interface. 11 to the specific driver for your MemoryStick interface.
12 12
13if MEMSTICK 13if MEMSTICK
14 14
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index bba467fe4bce..de80dba12f9b 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -18,7 +18,6 @@
18#include <linux/delay.h> 18#include <linux/delay.h>
19 19
20#define DRIVER_NAME "memstick" 20#define DRIVER_NAME "memstick"
21#define DRIVER_VERSION "0.2"
22 21
23static unsigned int cmd_retries = 3; 22static unsigned int cmd_retries = 3;
24module_param(cmd_retries, uint, 0644); 23module_param(cmd_retries, uint, 0644);
@@ -236,7 +235,7 @@ int memstick_next_req(struct memstick_host *host, struct memstick_request **mrq)
236 rc = host->card->next_request(host->card, mrq); 235 rc = host->card->next_request(host->card, mrq);
237 236
238 if (!rc) 237 if (!rc)
239 host->retries = cmd_retries; 238 host->retries = cmd_retries > 1 ? cmd_retries - 1 : 1;
240 else 239 else
241 *mrq = NULL; 240 *mrq = NULL;
242 241
@@ -271,7 +270,7 @@ void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc,
271 mrq->data_dir = READ; 270 mrq->data_dir = READ;
272 271
273 mrq->sg = *sg; 272 mrq->sg = *sg;
274 mrq->io_type = MEMSTICK_IO_SG; 273 mrq->long_data = 1;
275 274
276 if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD) 275 if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD)
277 mrq->need_card_int = 1; 276 mrq->need_card_int = 1;
@@ -306,7 +305,7 @@ void memstick_init_req(struct memstick_request *mrq, unsigned char tpc,
306 if (mrq->data_dir == WRITE) 305 if (mrq->data_dir == WRITE)
307 memcpy(mrq->data, buf, mrq->data_len); 306 memcpy(mrq->data, buf, mrq->data_len);
308 307
309 mrq->io_type = MEMSTICK_IO_VAL; 308 mrq->long_data = 0;
310 309
311 if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD) 310 if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD)
312 mrq->need_card_int = 1; 311 mrq->need_card_int = 1;
@@ -561,6 +560,31 @@ void memstick_free_host(struct memstick_host *host)
561} 560}
562EXPORT_SYMBOL(memstick_free_host); 561EXPORT_SYMBOL(memstick_free_host);
563 562
563/**
564 * memstick_suspend_host - notify bus driver of host suspension
565 * @host - host to use
566 */
567void memstick_suspend_host(struct memstick_host *host)
568{
569 mutex_lock(&host->lock);
570 host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
571 mutex_unlock(&host->lock);
572}
573EXPORT_SYMBOL(memstick_suspend_host);
574
575/**
576 * memstick_resume_host - notify bus driver of host resumption
577 * @host - host to use
578 */
579void memstick_resume_host(struct memstick_host *host)
580{
581 mutex_lock(&host->lock);
582 host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
583 mutex_unlock(&host->lock);
584 memstick_detect_change(host);
585}
586EXPORT_SYMBOL(memstick_resume_host);
587
564int memstick_register_driver(struct memstick_driver *drv) 588int memstick_register_driver(struct memstick_driver *drv)
565{ 589{
566 drv->driver.bus = &memstick_bus_type; 590 drv->driver.bus = &memstick_bus_type;
@@ -611,4 +635,3 @@ module_exit(memstick_exit);
611MODULE_AUTHOR("Alex Dubov"); 635MODULE_AUTHOR("Alex Dubov");
612MODULE_LICENSE("GPL"); 636MODULE_LICENSE("GPL");
613MODULE_DESCRIPTION("Sony MemoryStick core driver"); 637MODULE_DESCRIPTION("Sony MemoryStick core driver");
614MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 423ad8cf4bb9..1d637e4561d3 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -16,10 +16,10 @@
16#include <linux/idr.h> 16#include <linux/idr.h>
17#include <linux/hdreg.h> 17#include <linux/hdreg.h>
18#include <linux/kthread.h> 18#include <linux/kthread.h>
19#include <linux/delay.h>
19#include <linux/memstick.h> 20#include <linux/memstick.h>
20 21
21#define DRIVER_NAME "mspro_block" 22#define DRIVER_NAME "mspro_block"
22#define DRIVER_VERSION "0.2"
23 23
24static int major; 24static int major;
25module_param(major, int, 0644); 25module_param(major, int, 0644);
@@ -110,6 +110,17 @@ struct mspro_mbr {
110 unsigned int sectors_per_partition; 110 unsigned int sectors_per_partition;
111} __attribute__((packed)); 111} __attribute__((packed));
112 112
113struct mspro_specfile {
114 char name[8];
115 char ext[3];
116 unsigned char attr;
117 unsigned char reserved[10];
118 unsigned short time;
119 unsigned short date;
120 unsigned short cluster;
121 unsigned int size;
122} __attribute__((packed));
123
113struct mspro_devinfo { 124struct mspro_devinfo {
114 unsigned short cylinders; 125 unsigned short cylinders;
115 unsigned short heads; 126 unsigned short heads;
@@ -293,6 +304,20 @@ static ssize_t mspro_block_attr_show_sysinfo(struct device *dev,
293 dev_attr); 304 dev_attr);
294 struct mspro_sys_info *x_sys = x_attr->data; 305 struct mspro_sys_info *x_sys = x_attr->data;
295 ssize_t rc = 0; 306 ssize_t rc = 0;
307 int date_tz = 0, date_tz_f = 0;
308
309 if (x_sys->assembly_date[0] > 0x80U) {
310 date_tz = (~x_sys->assembly_date[0]) + 1;
311 date_tz_f = date_tz & 3;
312 date_tz >>= 2;
313 date_tz = -date_tz;
314 date_tz_f *= 15;
315 } else if (x_sys->assembly_date[0] < 0x80U) {
316 date_tz = x_sys->assembly_date[0];
317 date_tz_f = date_tz & 3;
318 date_tz >>= 2;
319 date_tz_f *= 15;
320 }
296 321
297 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "class: %x\n", 322 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "class: %x\n",
298 x_sys->class); 323 x_sys->class);
@@ -305,8 +330,8 @@ static ssize_t mspro_block_attr_show_sysinfo(struct device *dev,
305 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "page size: %x\n", 330 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "page size: %x\n",
306 be16_to_cpu(x_sys->page_size)); 331 be16_to_cpu(x_sys->page_size));
307 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly date: " 332 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly date: "
308 "%d %04u-%02u-%02u %02u:%02u:%02u\n", 333 "GMT%+d:%d %04u-%02u-%02u %02u:%02u:%02u\n",
309 x_sys->assembly_date[0], 334 date_tz, date_tz_f,
310 be16_to_cpu(*(unsigned short *) 335 be16_to_cpu(*(unsigned short *)
311 &x_sys->assembly_date[1]), 336 &x_sys->assembly_date[1]),
312 x_sys->assembly_date[3], x_sys->assembly_date[4], 337 x_sys->assembly_date[3], x_sys->assembly_date[4],
@@ -398,6 +423,41 @@ static ssize_t mspro_block_attr_show_mbr(struct device *dev,
398 return rc; 423 return rc;
399} 424}
400 425
426static ssize_t mspro_block_attr_show_specfile(struct device *dev,
427 struct device_attribute *attr,
428 char *buffer)
429{
430 struct mspro_sys_attr *x_attr = container_of(attr,
431 struct mspro_sys_attr,
432 dev_attr);
433 struct mspro_specfile *x_spfile = x_attr->data;
434 char name[9], ext[4];
435 ssize_t rc = 0;
436
437 memcpy(name, x_spfile->name, 8);
438 name[8] = 0;
439 memcpy(ext, x_spfile->ext, 3);
440 ext[3] = 0;
441
442 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "name: %s\n", name);
443 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "ext: %s\n", ext);
444 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "attribute: %x\n",
445 x_spfile->attr);
446 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "time: %d:%d:%d\n",
447 x_spfile->time >> 11,
448 (x_spfile->time >> 5) & 0x3f,
449 (x_spfile->time & 0x1f) * 2);
450 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "date: %d-%d-%d\n",
451 (x_spfile->date >> 9) + 1980,
452 (x_spfile->date >> 5) & 0xf,
453 x_spfile->date & 0x1f);
454 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start cluster: %x\n",
455 x_spfile->cluster);
456 rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "size: %x\n",
457 x_spfile->size);
458 return rc;
459}
460
401static ssize_t mspro_block_attr_show_devinfo(struct device *dev, 461static ssize_t mspro_block_attr_show_devinfo(struct device *dev,
402 struct device_attribute *attr, 462 struct device_attribute *attr,
403 char *buffer) 463 char *buffer)
@@ -430,6 +490,9 @@ static sysfs_show_t mspro_block_attr_show(unsigned char tag)
430 return mspro_block_attr_show_modelname; 490 return mspro_block_attr_show_modelname;
431 case MSPRO_BLOCK_ID_MBR: 491 case MSPRO_BLOCK_ID_MBR:
432 return mspro_block_attr_show_mbr; 492 return mspro_block_attr_show_mbr;
493 case MSPRO_BLOCK_ID_SPECFILEVALUES1:
494 case MSPRO_BLOCK_ID_SPECFILEVALUES2:
495 return mspro_block_attr_show_specfile;
433 case MSPRO_BLOCK_ID_DEVINFO: 496 case MSPRO_BLOCK_ID_DEVINFO:
434 return mspro_block_attr_show_devinfo; 497 return mspro_block_attr_show_devinfo;
435 default: 498 default:
@@ -629,7 +692,7 @@ static void mspro_block_process_request(struct memstick_dev *card,
629 param.system = msb->system; 692 param.system = msb->system;
630 param.data_count = cpu_to_be16(page_count); 693 param.data_count = cpu_to_be16(page_count);
631 param.data_address = cpu_to_be32((uint32_t)t_sec); 694 param.data_address = cpu_to_be32((uint32_t)t_sec);
632 param.cmd_param = 0; 695 param.tpc_param = 0;
633 696
634 msb->data_dir = rq_data_dir(req); 697 msb->data_dir = rq_data_dir(req);
635 msb->transfer_cmd = msb->data_dir == READ 698 msb->transfer_cmd = msb->data_dir == READ
@@ -758,10 +821,10 @@ static int mspro_block_switch_to_parallel(struct memstick_dev *card)
758 struct memstick_host *host = card->host; 821 struct memstick_host *host = card->host;
759 struct mspro_block_data *msb = memstick_get_drvdata(card); 822 struct mspro_block_data *msb = memstick_get_drvdata(card);
760 struct mspro_param_register param = { 823 struct mspro_param_register param = {
761 .system = 0, 824 .system = MEMSTICK_SYS_PAR4,
762 .data_count = 0, 825 .data_count = 0,
763 .data_address = 0, 826 .data_address = 0,
764 .cmd_param = 0 827 .tpc_param = 0
765 }; 828 };
766 829
767 card->next_request = h_mspro_block_req_init; 830 card->next_request = h_mspro_block_req_init;
@@ -773,8 +836,8 @@ static int mspro_block_switch_to_parallel(struct memstick_dev *card)
773 if (card->current_mrq.error) 836 if (card->current_mrq.error)
774 return card->current_mrq.error; 837 return card->current_mrq.error;
775 838
776 msb->system = 0; 839 msb->system = MEMSTICK_SYS_PAR4;
777 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PARALLEL); 840 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
778 841
779 card->next_request = h_mspro_block_req_init; 842 card->next_request = h_mspro_block_req_init;
780 msb->mrq_handler = h_mspro_block_default; 843 msb->mrq_handler = h_mspro_block_default;
@@ -783,8 +846,24 @@ static int mspro_block_switch_to_parallel(struct memstick_dev *card)
783 wait_for_completion(&card->mrq_complete); 846 wait_for_completion(&card->mrq_complete);
784 847
785 if (card->current_mrq.error) { 848 if (card->current_mrq.error) {
786 msb->system = 0x80; 849 msb->system = MEMSTICK_SYS_SERIAL;
850 host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
851 msleep(1000);
852 host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON);
787 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); 853 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
854
855 if (memstick_set_rw_addr(card))
856 return card->current_mrq.error;
857
858 param.system = msb->system;
859
860 card->next_request = h_mspro_block_req_init;
861 msb->mrq_handler = h_mspro_block_default;
862 memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param,
863 sizeof(param));
864 memstick_new_req(host);
865 wait_for_completion(&card->mrq_complete);
866
788 return -EFAULT; 867 return -EFAULT;
789 } 868 }
790 869
@@ -802,7 +881,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
802 .system = msb->system, 881 .system = msb->system,
803 .data_count = cpu_to_be16(1), 882 .data_count = cpu_to_be16(1),
804 .data_address = 0, 883 .data_address = 0,
805 .cmd_param = 0 884 .tpc_param = 0
806 }; 885 };
807 struct mspro_attribute *attr = NULL; 886 struct mspro_attribute *attr = NULL;
808 struct mspro_sys_attr *s_attr = NULL; 887 struct mspro_sys_attr *s_attr = NULL;
@@ -922,7 +1001,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
922 param.system = msb->system; 1001 param.system = msb->system;
923 param.data_count = cpu_to_be16((rc / msb->page_size) + 1); 1002 param.data_count = cpu_to_be16((rc / msb->page_size) + 1);
924 param.data_address = cpu_to_be32(addr / msb->page_size); 1003 param.data_address = cpu_to_be32(addr / msb->page_size);
925 param.cmd_param = 0; 1004 param.tpc_param = 0;
926 1005
927 sg_init_one(&msb->req_sg[0], buffer, 1006 sg_init_one(&msb->req_sg[0], buffer,
928 be16_to_cpu(param.data_count) * msb->page_size); 1007 be16_to_cpu(param.data_count) * msb->page_size);
@@ -964,7 +1043,7 @@ static int mspro_block_init_card(struct memstick_dev *card)
964 struct memstick_host *host = card->host; 1043 struct memstick_host *host = card->host;
965 int rc = 0; 1044 int rc = 0;
966 1045
967 msb->system = 0x80; 1046 msb->system = MEMSTICK_SYS_SERIAL;
968 card->reg_addr.r_offset = offsetof(struct mspro_register, status); 1047 card->reg_addr.r_offset = offsetof(struct mspro_register, status);
969 card->reg_addr.r_length = sizeof(struct ms_status_register); 1048 card->reg_addr.r_length = sizeof(struct ms_status_register);
970 card->reg_addr.w_offset = offsetof(struct mspro_register, param); 1049 card->reg_addr.w_offset = offsetof(struct mspro_register, param);
@@ -973,7 +1052,7 @@ static int mspro_block_init_card(struct memstick_dev *card)
973 if (memstick_set_rw_addr(card)) 1052 if (memstick_set_rw_addr(card))
974 return -EIO; 1053 return -EIO;
975 1054
976 if (host->caps & MEMSTICK_CAP_PARALLEL) { 1055 if (host->caps & MEMSTICK_CAP_PAR4) {
977 if (mspro_block_switch_to_parallel(card)) 1056 if (mspro_block_switch_to_parallel(card))
978 printk(KERN_WARNING "%s: could not switch to " 1057 printk(KERN_WARNING "%s: could not switch to "
979 "parallel interface\n", card->dev.bus_id); 1058 "parallel interface\n", card->dev.bus_id);
@@ -1348,4 +1427,3 @@ MODULE_LICENSE("GPL");
1348MODULE_AUTHOR("Alex Dubov"); 1427MODULE_AUTHOR("Alex Dubov");
1349MODULE_DESCRIPTION("Sony MemoryStickPro block device driver"); 1428MODULE_DESCRIPTION("Sony MemoryStickPro block device driver");
1350MODULE_DEVICE_TABLE(memstick, mspro_block_id_tbl); 1429MODULE_DEVICE_TABLE(memstick, mspro_block_id_tbl);
1351MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index c002fcc3c879..4ce5c8dffb68 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -20,3 +20,13 @@ config MEMSTICK_TIFM_MS
20 To compile this driver as a module, choose M here: the 20 To compile this driver as a module, choose M here: the
21 module will be called tifm_ms. 21 module will be called tifm_ms.
22 22
23config MEMSTICK_JMICRON_38X
24 tristate "JMicron JMB38X MemoryStick interface support (EXPERIMENTAL)"
25 depends on EXPERIMENTAL && PCI
26
27 help
28 Say Y here if you want to be able to access MemoryStick cards with
29 the JMicron(R) JMB38X MemoryStick card reader.
30
31 To compile this driver as a module, choose M here: the
32 module will be called jmb38x_ms.
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile
index ee666380efa1..12530e4311d3 100644
--- a/drivers/memstick/host/Makefile
+++ b/drivers/memstick/host/Makefile
@@ -3,8 +3,8 @@
3# 3#
4 4
5ifeq ($(CONFIG_MEMSTICK_DEBUG),y) 5ifeq ($(CONFIG_MEMSTICK_DEBUG),y)
6 EXTRA_CFLAGS += -DDEBUG 6 EXTRA_CFLAGS += -DDEBUG
7endif 7endif
8 8
9obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o 9obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o
10 10obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
new file mode 100644
index 000000000000..03fe8783b1ee
--- /dev/null
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -0,0 +1,945 @@
1/*
2 * jmb38x_ms.c - JMicron jmb38x MemoryStick card reader
3 *
4 * Copyright (C) 2008 Alex Dubov <oakad@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/spinlock.h>
13#include <linux/interrupt.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/highmem.h>
17#include <linux/memstick.h>
18
19#define DRIVER_NAME "jmb38x_ms"
20
21static int no_dma;
22module_param(no_dma, bool, 0644);
23
24enum {
25 DMA_ADDRESS = 0x00,
26 BLOCK = 0x04,
27 DMA_CONTROL = 0x08,
28 TPC_P0 = 0x0c,
29 TPC_P1 = 0x10,
30 TPC = 0x14,
31 HOST_CONTROL = 0x18,
32 DATA = 0x1c,
33 STATUS = 0x20,
34 INT_STATUS = 0x24,
35 INT_STATUS_ENABLE = 0x28,
36 INT_SIGNAL_ENABLE = 0x2c,
37 TIMER = 0x30,
38 TIMER_CONTROL = 0x34,
39 PAD_OUTPUT_ENABLE = 0x38,
40 PAD_PU_PD = 0x3c,
41 CLOCK_DELAY = 0x40,
42 ADMA_ADDRESS = 0x44,
43 CLOCK_CONTROL = 0x48,
44 LED_CONTROL = 0x4c,
45 VERSION = 0x50
46};
47
48struct jmb38x_ms_host {
49 struct jmb38x_ms *chip;
50 void __iomem *addr;
51 spinlock_t lock;
52 int id;
53 char host_id[DEVICE_ID_SIZE];
54 int irq;
55 unsigned int block_pos;
56 unsigned long timeout_jiffies;
57 struct timer_list timer;
58 struct memstick_request *req;
59 unsigned char eject:1,
60 use_dma:1;
61 unsigned char cmd_flags;
62 unsigned char io_pos;
63 unsigned int io_word[2];
64};
65
66struct jmb38x_ms {
67 struct pci_dev *pdev;
68 int host_cnt;
69 struct memstick_host *hosts[];
70};
71
72#define BLOCK_COUNT_MASK 0xffff0000
73#define BLOCK_SIZE_MASK 0x00000fff
74
75#define DMA_CONTROL_ENABLE 0x00000001
76
77#define TPC_DATA_SEL 0x00008000
78#define TPC_DIR 0x00004000
79#define TPC_WAIT_INT 0x00002000
80#define TPC_GET_INT 0x00000800
81#define TPC_CODE_SZ_MASK 0x00000700
82#define TPC_DATA_SZ_MASK 0x00000007
83
84#define HOST_CONTROL_RESET_REQ 0x00008000
85#define HOST_CONTROL_REI 0x00004000
86#define HOST_CONTROL_LED 0x00000400
87#define HOST_CONTROL_FAST_CLK 0x00000200
88#define HOST_CONTROL_RESET 0x00000100
89#define HOST_CONTROL_POWER_EN 0x00000080
90#define HOST_CONTROL_CLOCK_EN 0x00000040
91#define HOST_CONTROL_IF_SHIFT 4
92
93#define HOST_CONTROL_IF_SERIAL 0x0
94#define HOST_CONTROL_IF_PAR4 0x1
95#define HOST_CONTROL_IF_PAR8 0x3
96
97#define STATUS_HAS_MEDIA 0x00000400
98#define STATUS_FIFO_EMPTY 0x00000200
99#define STATUS_FIFO_FULL 0x00000100
100
101#define INT_STATUS_TPC_ERR 0x00080000
102#define INT_STATUS_CRC_ERR 0x00040000
103#define INT_STATUS_TIMER_TO 0x00020000
104#define INT_STATUS_HSK_TO 0x00010000
105#define INT_STATUS_ANY_ERR 0x00008000
106#define INT_STATUS_FIFO_WRDY 0x00000080
107#define INT_STATUS_FIFO_RRDY 0x00000040
108#define INT_STATUS_MEDIA_OUT 0x00000010
109#define INT_STATUS_MEDIA_IN 0x00000008
110#define INT_STATUS_DMA_BOUNDARY 0x00000004
111#define INT_STATUS_EOTRAN 0x00000002
112#define INT_STATUS_EOTPC 0x00000001
113
114#define INT_STATUS_ALL 0x000f801f
115
116#define PAD_OUTPUT_ENABLE_MS 0x0F3F
117
118#define PAD_PU_PD_OFF 0x7FFF0000
119#define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000
120#define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000
121
122enum {
123 CMD_READY = 0x01,
124 FIFO_READY = 0x02,
125 REG_DATA = 0x04,
126 AUTO_GET_INT = 0x08
127};
128
129static unsigned int jmb38x_ms_read_data(struct jmb38x_ms_host *host,
130 unsigned char *buf, unsigned int length)
131{
132 unsigned int off = 0;
133
134 while (host->io_pos && length) {
135 buf[off++] = host->io_word[0] & 0xff;
136 host->io_word[0] >>= 8;
137 length--;
138 host->io_pos--;
139 }
140
141 if (!length)
142 return off;
143
144 while (!(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) {
145 if (length < 4)
146 break;
147 *(unsigned int *)(buf + off) = __raw_readl(host->addr + DATA);
148 length -= 4;
149 off += 4;
150 }
151
152 if (length
153 && !(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) {
154 host->io_word[0] = readl(host->addr + DATA);
155 for (host->io_pos = 4; host->io_pos; --host->io_pos) {
156 buf[off++] = host->io_word[0] & 0xff;
157 host->io_word[0] >>= 8;
158 length--;
159 if (!length)
160 break;
161 }
162 }
163
164 return off;
165}
166
167static unsigned int jmb38x_ms_read_reg_data(struct jmb38x_ms_host *host,
168 unsigned char *buf,
169 unsigned int length)
170{
171 unsigned int off = 0;
172
173 while (host->io_pos > 4 && length) {
174 buf[off++] = host->io_word[0] & 0xff;
175 host->io_word[0] >>= 8;
176 length--;
177 host->io_pos--;
178 }
179
180 if (!length)
181 return off;
182
183 while (host->io_pos && length) {
184 buf[off++] = host->io_word[1] & 0xff;
185 host->io_word[1] >>= 8;
186 length--;
187 host->io_pos--;
188 }
189
190 return off;
191}
192
193static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host,
194 unsigned char *buf,
195 unsigned int length)
196{
197 unsigned int off = 0;
198
199 if (host->io_pos) {
200 while (host->io_pos < 4 && length) {
201 host->io_word[0] |= buf[off++] << (host->io_pos * 8);
202 host->io_pos++;
203 length--;
204 }
205 }
206
207 if (host->io_pos == 4
208 && !(STATUS_FIFO_FULL & readl(host->addr + STATUS))) {
209 writel(host->io_word[0], host->addr + DATA);
210 host->io_pos = 0;
211 host->io_word[0] = 0;
212 } else if (host->io_pos) {
213 return off;
214 }
215
216 if (!length)
217 return off;
218
219 while (!(STATUS_FIFO_FULL & readl(host->addr + STATUS))) {
220 if (length < 4)
221 break;
222
223 __raw_writel(*(unsigned int *)(buf + off),
224 host->addr + DATA);
225 length -= 4;
226 off += 4;
227 }
228
229 switch (length) {
230 case 3:
231 host->io_word[0] |= buf[off + 2] << 16;
232 host->io_pos++;
233 case 2:
234 host->io_word[0] |= buf[off + 1] << 8;
235 host->io_pos++;
236 case 1:
237 host->io_word[0] |= buf[off];
238 host->io_pos++;
239 }
240
241 off += host->io_pos;
242
243 return off;
244}
245
246static unsigned int jmb38x_ms_write_reg_data(struct jmb38x_ms_host *host,
247 unsigned char *buf,
248 unsigned int length)
249{
250 unsigned int off = 0;
251
252 while (host->io_pos < 4 && length) {
253 host->io_word[0] &= ~(0xff << (host->io_pos * 8));
254 host->io_word[0] |= buf[off++] << (host->io_pos * 8);
255 host->io_pos++;
256 length--;
257 }
258
259 if (!length)
260 return off;
261
262 while (host->io_pos < 8 && length) {
263 host->io_word[1] &= ~(0xff << (host->io_pos * 8));
264 host->io_word[1] |= buf[off++] << (host->io_pos * 8);
265 host->io_pos++;
266 length--;
267 }
268
269 return off;
270}
271
272static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
273{
274 unsigned int length;
275 unsigned int off;
276 unsigned int t_size, p_off, p_cnt;
277 unsigned char *buf;
278 struct page *pg;
279 unsigned long flags = 0;
280
281 if (host->req->long_data) {
282 length = host->req->sg.length - host->block_pos;
283 off = host->req->sg.offset + host->block_pos;
284 } else {
285 length = host->req->data_len - host->block_pos;
286 off = 0;
287 }
288
289 while (length) {
290 if (host->req->long_data) {
291 pg = nth_page(sg_page(&host->req->sg),
292 off >> PAGE_SHIFT);
293 p_off = offset_in_page(off);
294 p_cnt = PAGE_SIZE - p_off;
295 p_cnt = min(p_cnt, length);
296
297 local_irq_save(flags);
298 buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off;
299 } else {
300 buf = host->req->data + host->block_pos;
301 p_cnt = host->req->data_len - host->block_pos;
302 }
303
304 if (host->req->data_dir == WRITE)
305 t_size = !(host->cmd_flags & REG_DATA)
306 ? jmb38x_ms_write_data(host, buf, p_cnt)
307 : jmb38x_ms_write_reg_data(host, buf, p_cnt);
308 else
309 t_size = !(host->cmd_flags & REG_DATA)
310 ? jmb38x_ms_read_data(host, buf, p_cnt)
311 : jmb38x_ms_read_reg_data(host, buf, p_cnt);
312
313 if (host->req->long_data) {
314 kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ);
315 local_irq_restore(flags);
316 }
317
318 if (!t_size)
319 break;
320 host->block_pos += t_size;
321 length -= t_size;
322 off += t_size;
323 }
324
325 if (!length && host->req->data_dir == WRITE) {
326 if (host->cmd_flags & REG_DATA) {
327 writel(host->io_word[0], host->addr + TPC_P0);
328 writel(host->io_word[1], host->addr + TPC_P1);
329 } else if (host->io_pos) {
330 writel(host->io_word[0], host->addr + DATA);
331 }
332 }
333
334 return length;
335}
336
337static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
338{
339 struct jmb38x_ms_host *host = memstick_priv(msh);
340 unsigned char *data;
341 unsigned int data_len, cmd, t_val;
342
343 if (!(STATUS_HAS_MEDIA & readl(host->addr + STATUS))) {
344 dev_dbg(msh->cdev.dev, "no media status\n");
345 host->req->error = -ETIME;
346 return host->req->error;
347 }
348
349 dev_dbg(msh->cdev.dev, "control %08x\n",
350 readl(host->addr + HOST_CONTROL));
351 dev_dbg(msh->cdev.dev, "status %08x\n", readl(host->addr + INT_STATUS));
352 dev_dbg(msh->cdev.dev, "hstatus %08x\n", readl(host->addr + STATUS));
353
354 host->cmd_flags = 0;
355 host->block_pos = 0;
356 host->io_pos = 0;
357 host->io_word[0] = 0;
358 host->io_word[1] = 0;
359
360 cmd = host->req->tpc << 16;
361 cmd |= TPC_DATA_SEL;
362
363 if (host->req->data_dir == READ)
364 cmd |= TPC_DIR;
365 if (host->req->need_card_int)
366 cmd |= TPC_WAIT_INT;
367 if (host->req->get_int_reg)
368 cmd |= TPC_GET_INT;
369
370 data = host->req->data;
371
372 host->use_dma = !no_dma;
373
374 if (host->req->long_data) {
375 data_len = host->req->sg.length;
376 } else {
377 data_len = host->req->data_len;
378 host->use_dma = 0;
379 }
380
381 if (data_len <= 8) {
382 cmd &= ~(TPC_DATA_SEL | 0xf);
383 host->cmd_flags |= REG_DATA;
384 cmd |= data_len & 0xf;
385 host->use_dma = 0;
386 }
387
388 if (host->use_dma) {
389 if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1,
390 host->req->data_dir == READ
391 ? PCI_DMA_FROMDEVICE
392 : PCI_DMA_TODEVICE)) {
393 host->req->error = -ENOMEM;
394 return host->req->error;
395 }
396 data_len = sg_dma_len(&host->req->sg);
397 writel(sg_dma_address(&host->req->sg),
398 host->addr + DMA_ADDRESS);
399 writel(((1 << 16) & BLOCK_COUNT_MASK)
400 | (data_len & BLOCK_SIZE_MASK),
401 host->addr + BLOCK);
402 writel(DMA_CONTROL_ENABLE, host->addr + DMA_CONTROL);
403 } else if (!(host->cmd_flags & REG_DATA)) {
404 writel(((1 << 16) & BLOCK_COUNT_MASK)
405 | (data_len & BLOCK_SIZE_MASK),
406 host->addr + BLOCK);
407 t_val = readl(host->addr + INT_STATUS_ENABLE);
408 t_val |= host->req->data_dir == READ
409 ? INT_STATUS_FIFO_RRDY
410 : INT_STATUS_FIFO_WRDY;
411
412 writel(t_val, host->addr + INT_STATUS_ENABLE);
413 writel(t_val, host->addr + INT_SIGNAL_ENABLE);
414 } else {
415 cmd &= ~(TPC_DATA_SEL | 0xf);
416 host->cmd_flags |= REG_DATA;
417 cmd |= data_len & 0xf;
418
419 if (host->req->data_dir == WRITE) {
420 jmb38x_ms_transfer_data(host);
421 writel(host->io_word[0], host->addr + TPC_P0);
422 writel(host->io_word[1], host->addr + TPC_P1);
423 }
424 }
425
426 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
427 writel(HOST_CONTROL_LED | readl(host->addr + HOST_CONTROL),
428 host->addr + HOST_CONTROL);
429 host->req->error = 0;
430
431 writel(cmd, host->addr + TPC);
432 dev_dbg(msh->cdev.dev, "executing TPC %08x, len %x\n", cmd, data_len);
433
434 return 0;
435}
436
437static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last)
438{
439 struct jmb38x_ms_host *host = memstick_priv(msh);
440 unsigned int t_val = 0;
441 int rc;
442
443 del_timer(&host->timer);
444
445 dev_dbg(msh->cdev.dev, "c control %08x\n",
446 readl(host->addr + HOST_CONTROL));
447 dev_dbg(msh->cdev.dev, "c status %08x\n",
448 readl(host->addr + INT_STATUS));
449 dev_dbg(msh->cdev.dev, "c hstatus %08x\n", readl(host->addr + STATUS));
450
451 if (host->req->get_int_reg) {
452 t_val = readl(host->addr + TPC_P0);
453 host->req->int_reg = (t_val & 0xff);
454 }
455
456 if (host->use_dma) {
457 writel(0, host->addr + DMA_CONTROL);
458 pci_unmap_sg(host->chip->pdev, &host->req->sg, 1,
459 host->req->data_dir == READ
460 ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
461 } else {
462 t_val = readl(host->addr + INT_STATUS_ENABLE);
463 if (host->req->data_dir == READ)
464 t_val &= ~INT_STATUS_FIFO_RRDY;
465 else
466 t_val &= ~INT_STATUS_FIFO_WRDY;
467
468 writel(t_val, host->addr + INT_STATUS_ENABLE);
469 writel(t_val, host->addr + INT_SIGNAL_ENABLE);
470 }
471
472 writel((~HOST_CONTROL_LED) & readl(host->addr + HOST_CONTROL),
473 host->addr + HOST_CONTROL);
474
475 if (!last) {
476 do {
477 rc = memstick_next_req(msh, &host->req);
478 } while (!rc && jmb38x_ms_issue_cmd(msh));
479 } else {
480 do {
481 rc = memstick_next_req(msh, &host->req);
482 if (!rc)
483 host->req->error = -ETIME;
484 } while (!rc);
485 }
486}
487
488static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id)
489{
490 struct memstick_host *msh = dev_id;
491 struct jmb38x_ms_host *host = memstick_priv(msh);
492 unsigned int irq_status;
493
494 spin_lock(&host->lock);
495 irq_status = readl(host->addr + INT_STATUS);
496 dev_dbg(&host->chip->pdev->dev, "irq_status = %08x\n", irq_status);
497 if (irq_status == 0 || irq_status == (~0)) {
498 spin_unlock(&host->lock);
499 return IRQ_NONE;
500 }
501
502 if (host->req) {
503 if (irq_status & INT_STATUS_ANY_ERR) {
504 if (irq_status & INT_STATUS_CRC_ERR)
505 host->req->error = -EILSEQ;
506 else
507 host->req->error = -ETIME;
508 } else {
509 if (host->use_dma) {
510 if (irq_status & INT_STATUS_EOTRAN)
511 host->cmd_flags |= FIFO_READY;
512 } else {
513 if (irq_status & (INT_STATUS_FIFO_RRDY
514 | INT_STATUS_FIFO_WRDY))
515 jmb38x_ms_transfer_data(host);
516
517 if (irq_status & INT_STATUS_EOTRAN) {
518 jmb38x_ms_transfer_data(host);
519 host->cmd_flags |= FIFO_READY;
520 }
521 }
522
523 if (irq_status & INT_STATUS_EOTPC) {
524 host->cmd_flags |= CMD_READY;
525 if (host->cmd_flags & REG_DATA) {
526 if (host->req->data_dir == READ) {
527 host->io_word[0]
528 = readl(host->addr
529 + TPC_P0);
530 host->io_word[1]
531 = readl(host->addr
532 + TPC_P1);
533 host->io_pos = 8;
534
535 jmb38x_ms_transfer_data(host);
536 }
537 host->cmd_flags |= FIFO_READY;
538 }
539 }
540 }
541 }
542
543 if (irq_status & (INT_STATUS_MEDIA_IN | INT_STATUS_MEDIA_OUT)) {
544 dev_dbg(&host->chip->pdev->dev, "media changed\n");
545 memstick_detect_change(msh);
546 }
547
548 writel(irq_status, host->addr + INT_STATUS);
549
550 if (host->req
551 && (((host->cmd_flags & CMD_READY)
552 && (host->cmd_flags & FIFO_READY))
553 || host->req->error))
554 jmb38x_ms_complete_cmd(msh, 0);
555
556 spin_unlock(&host->lock);
557 return IRQ_HANDLED;
558}
559
560static void jmb38x_ms_abort(unsigned long data)
561{
562 struct memstick_host *msh = (struct memstick_host *)data;
563 struct jmb38x_ms_host *host = memstick_priv(msh);
564 unsigned long flags;
565
566 dev_dbg(&host->chip->pdev->dev, "abort\n");
567 spin_lock_irqsave(&host->lock, flags);
568 if (host->req) {
569 host->req->error = -ETIME;
570 jmb38x_ms_complete_cmd(msh, 0);
571 }
572 spin_unlock_irqrestore(&host->lock, flags);
573}
574
575static void jmb38x_ms_request(struct memstick_host *msh)
576{
577 struct jmb38x_ms_host *host = memstick_priv(msh);
578 unsigned long flags;
579 int rc;
580
581 spin_lock_irqsave(&host->lock, flags);
582 if (host->req) {
583 spin_unlock_irqrestore(&host->lock, flags);
584 BUG();
585 return;
586 }
587
588 do {
589 rc = memstick_next_req(msh, &host->req);
590 } while (!rc && jmb38x_ms_issue_cmd(msh));
591 spin_unlock_irqrestore(&host->lock, flags);
592}
593
594static void jmb38x_ms_reset(struct jmb38x_ms_host *host)
595{
596 unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
597
598 writel(host_ctl | HOST_CONTROL_RESET_REQ | HOST_CONTROL_RESET,
599 host->addr + HOST_CONTROL);
600
601 while (HOST_CONTROL_RESET_REQ
602 & (host_ctl = readl(host->addr + HOST_CONTROL))) {
603 ndelay(100);
604 dev_dbg(&host->chip->pdev->dev, "reset\n");
605 }
606
607 writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE);
608 writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE);
609
610 dev_dbg(&host->chip->pdev->dev, "reset\n");
611}
612
613static void jmb38x_ms_set_param(struct memstick_host *msh,
614 enum memstick_param param,
615 int value)
616{
617 struct jmb38x_ms_host *host = memstick_priv(msh);
618 unsigned int host_ctl;
619 unsigned long flags;
620
621 spin_lock_irqsave(&host->lock, flags);
622
623 switch (param) {
624 case MEMSTICK_POWER:
625 if (value == MEMSTICK_POWER_ON) {
626 jmb38x_ms_reset(host);
627
628 writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
629 : PAD_PU_PD_ON_MS_SOCK0,
630 host->addr + PAD_PU_PD);
631
632 writel(PAD_OUTPUT_ENABLE_MS,
633 host->addr + PAD_OUTPUT_ENABLE);
634
635 host_ctl = readl(host->addr + HOST_CONTROL);
636 host_ctl |= 7;
637 writel(host_ctl | (HOST_CONTROL_POWER_EN
638 | HOST_CONTROL_CLOCK_EN),
639 host->addr + HOST_CONTROL);
640
641 dev_dbg(&host->chip->pdev->dev, "power on\n");
642 } else if (value == MEMSTICK_POWER_OFF) {
643 writel(readl(host->addr + HOST_CONTROL)
644 & ~(HOST_CONTROL_POWER_EN
645 | HOST_CONTROL_CLOCK_EN),
646 host->addr + HOST_CONTROL);
647 writel(0, host->addr + PAD_OUTPUT_ENABLE);
648 writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD);
649 dev_dbg(&host->chip->pdev->dev, "power off\n");
650 }
651 break;
652 case MEMSTICK_INTERFACE:
653 /* jmb38x_ms_reset(host); */
654
655 host_ctl = readl(host->addr + HOST_CONTROL);
656 host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
657 /* host_ctl |= 7; */
658
659 if (value == MEMSTICK_SERIAL) {
660 host_ctl &= ~HOST_CONTROL_FAST_CLK;
661 host_ctl |= HOST_CONTROL_IF_SERIAL
662 << HOST_CONTROL_IF_SHIFT;
663 host_ctl |= HOST_CONTROL_REI;
664 writel(0, host->addr + CLOCK_DELAY);
665 } else if (value == MEMSTICK_PAR4) {
666 host_ctl |= HOST_CONTROL_FAST_CLK;
667 host_ctl |= HOST_CONTROL_IF_PAR4
668 << HOST_CONTROL_IF_SHIFT;
669 host_ctl &= ~HOST_CONTROL_REI;
670 writel(4, host->addr + CLOCK_DELAY);
671 } else if (value == MEMSTICK_PAR8) {
672 host_ctl |= HOST_CONTROL_FAST_CLK;
673 host_ctl |= HOST_CONTROL_IF_PAR8
674 << HOST_CONTROL_IF_SHIFT;
675 host_ctl &= ~HOST_CONTROL_REI;
676 writel(4, host->addr + CLOCK_DELAY);
677 }
678 writel(host_ctl, host->addr + HOST_CONTROL);
679 break;
680 };
681
682 spin_unlock_irqrestore(&host->lock, flags);
683}
684
685#ifdef CONFIG_PM
686
687static int jmb38x_ms_suspend(struct pci_dev *dev, pm_message_t state)
688{
689 struct jmb38x_ms *jm = pci_get_drvdata(dev);
690 int cnt;
691
692 for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
693 if (!jm->hosts[cnt])
694 break;
695 memstick_suspend_host(jm->hosts[cnt]);
696 }
697
698 pci_save_state(dev);
699 pci_enable_wake(dev, pci_choose_state(dev, state), 0);
700 pci_disable_device(dev);
701 pci_set_power_state(dev, pci_choose_state(dev, state));
702 return 0;
703}
704
705static int jmb38x_ms_resume(struct pci_dev *dev)
706{
707 struct jmb38x_ms *jm = pci_get_drvdata(dev);
708 int rc;
709
710 pci_set_power_state(dev, PCI_D0);
711 pci_restore_state(dev);
712 rc = pci_enable_device(dev);
713 if (rc)
714 return rc;
715 pci_set_master(dev);
716
717 pci_read_config_dword(dev, 0xac, &rc);
718 pci_write_config_dword(dev, 0xac, rc | 0x00470000);
719
720 for (rc = 0; rc < jm->host_cnt; ++rc) {
721 if (!jm->hosts[rc])
722 break;
723 memstick_resume_host(jm->hosts[rc]);
724 memstick_detect_change(jm->hosts[rc]);
725 }
726
727 return 0;
728}
729
730#else
731
732#define jmb38x_ms_suspend NULL
733#define jmb38x_ms_resume NULL
734
735#endif /* CONFIG_PM */
736
737static int jmb38x_ms_count_slots(struct pci_dev *pdev)
738{
739 int cnt, rc = 0;
740
741 for (cnt = 0; cnt < PCI_ROM_RESOURCE; ++cnt) {
742 if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt)))
743 break;
744
745 if (256 != pci_resource_len(pdev, cnt))
746 break;
747
748 ++rc;
749 }
750 return rc;
751}
752
753static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
754{
755 struct memstick_host *msh;
756 struct jmb38x_ms_host *host;
757
758 msh = memstick_alloc_host(sizeof(struct jmb38x_ms_host),
759 &jm->pdev->dev);
760 if (!msh)
761 return NULL;
762
763 host = memstick_priv(msh);
764 host->chip = jm;
765 host->addr = ioremap(pci_resource_start(jm->pdev, cnt),
766 pci_resource_len(jm->pdev, cnt));
767 if (!host->addr)
768 goto err_out_free;
769
770 spin_lock_init(&host->lock);
771 host->id = cnt;
772 snprintf(host->host_id, DEVICE_ID_SIZE, DRIVER_NAME ":slot%d",
773 host->id);
774 host->irq = jm->pdev->irq;
775 host->timeout_jiffies = msecs_to_jiffies(4000);
776 msh->request = jmb38x_ms_request;
777 msh->set_param = jmb38x_ms_set_param;
778 /*
779 msh->caps = MEMSTICK_CAP_AUTO_GET_INT | MEMSTICK_CAP_PAR4
780 | MEMSTICK_CAP_PAR8;
781 */
782 msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8;
783
784 setup_timer(&host->timer, jmb38x_ms_abort, (unsigned long)msh);
785
786 if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id,
787 msh))
788 return msh;
789
790 iounmap(host->addr);
791err_out_free:
792 kfree(msh);
793 return NULL;
794}
795
796static void jmb38x_ms_free_host(struct memstick_host *msh)
797{
798 struct jmb38x_ms_host *host = memstick_priv(msh);
799
800 free_irq(host->irq, msh);
801 iounmap(host->addr);
802 memstick_free_host(msh);
803}
804
805static int jmb38x_ms_probe(struct pci_dev *pdev,
806 const struct pci_device_id *dev_id)
807{
808 struct jmb38x_ms *jm;
809 int pci_dev_busy = 0;
810 int rc, cnt;
811
812 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
813 if (rc)
814 return rc;
815
816 rc = pci_enable_device(pdev);
817 if (rc)
818 return rc;
819
820 pci_set_master(pdev);
821
822 rc = pci_request_regions(pdev, DRIVER_NAME);
823 if (rc) {
824 pci_dev_busy = 1;
825 goto err_out;
826 }
827
828 pci_read_config_dword(pdev, 0xac, &rc);
829 pci_write_config_dword(pdev, 0xac, rc | 0x00470000);
830
831 cnt = jmb38x_ms_count_slots(pdev);
832 if (!cnt) {
833 rc = -ENODEV;
834 pci_dev_busy = 1;
835 goto err_out;
836 }
837
838 jm = kzalloc(sizeof(struct jmb38x_ms)
839 + cnt * sizeof(struct memstick_host *), GFP_KERNEL);
840 if (!jm) {
841 rc = -ENOMEM;
842 goto err_out_int;
843 }
844
845 jm->pdev = pdev;
846 jm->host_cnt = cnt;
847 pci_set_drvdata(pdev, jm);
848
849 for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
850 jm->hosts[cnt] = jmb38x_ms_alloc_host(jm, cnt);
851 if (!jm->hosts[cnt])
852 break;
853
854 rc = memstick_add_host(jm->hosts[cnt]);
855
856 if (rc) {
857 jmb38x_ms_free_host(jm->hosts[cnt]);
858 jm->hosts[cnt] = NULL;
859 break;
860 }
861 }
862
863 if (cnt)
864 return 0;
865
866 rc = -ENODEV;
867
868 pci_set_drvdata(pdev, NULL);
869 kfree(jm);
870err_out_int:
871 pci_release_regions(pdev);
872err_out:
873 if (!pci_dev_busy)
874 pci_disable_device(pdev);
875 return rc;
876}
877
878static void jmb38x_ms_remove(struct pci_dev *dev)
879{
880 struct jmb38x_ms *jm = pci_get_drvdata(dev);
881 struct jmb38x_ms_host *host;
882 int cnt;
883 unsigned long flags;
884
885 for (cnt = 0; cnt < jm->host_cnt; ++cnt) {
886 if (!jm->hosts[cnt])
887 break;
888
889 host = memstick_priv(jm->hosts[cnt]);
890
891 writel(0, host->addr + INT_SIGNAL_ENABLE);
892 writel(0, host->addr + INT_STATUS_ENABLE);
893 mmiowb();
894 dev_dbg(&jm->pdev->dev, "interrupts off\n");
895 spin_lock_irqsave(&host->lock, flags);
896 if (host->req) {
897 host->req->error = -ETIME;
898 jmb38x_ms_complete_cmd(jm->hosts[cnt], 1);
899 }
900 spin_unlock_irqrestore(&host->lock, flags);
901
902 memstick_remove_host(jm->hosts[cnt]);
903 dev_dbg(&jm->pdev->dev, "host removed\n");
904
905 jmb38x_ms_free_host(jm->hosts[cnt]);
906 }
907
908 pci_set_drvdata(dev, NULL);
909 pci_release_regions(dev);
910 pci_disable_device(dev);
911 kfree(jm);
912}
913
914static struct pci_device_id jmb38x_ms_id_tbl [] = {
915 { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS, PCI_ANY_ID,
916 PCI_ANY_ID, 0, 0, 0 },
917 { }
918};
919
920static struct pci_driver jmb38x_ms_driver = {
921 .name = DRIVER_NAME,
922 .id_table = jmb38x_ms_id_tbl,
923 .probe = jmb38x_ms_probe,
924 .remove = jmb38x_ms_remove,
925 .suspend = jmb38x_ms_suspend,
926 .resume = jmb38x_ms_resume
927};
928
929static int __init jmb38x_ms_init(void)
930{
931 return pci_register_driver(&jmb38x_ms_driver);
932}
933
934static void __exit jmb38x_ms_exit(void)
935{
936 pci_unregister_driver(&jmb38x_ms_driver);
937}
938
939MODULE_AUTHOR("Alex Dubov");
940MODULE_DESCRIPTION("JMicron jmb38x MemoryStick driver");
941MODULE_LICENSE("GPL");
942MODULE_DEVICE_TABLE(pci, jmb38x_ms_id_tbl);
943
944module_init(jmb38x_ms_init);
945module_exit(jmb38x_ms_exit);
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 4fb24215bd95..2b5bf52a8302 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -20,293 +20,315 @@
20#include <asm/io.h> 20#include <asm/io.h>
21 21
22#define DRIVER_NAME "tifm_ms" 22#define DRIVER_NAME "tifm_ms"
23#define DRIVER_VERSION "0.1"
24 23
25static int no_dma; 24static int no_dma;
26module_param(no_dma, bool, 0644); 25module_param(no_dma, bool, 0644);
27 26
28#define TIFM_MS_TIMEOUT 0x00100 27/*
29#define TIFM_MS_BADCRC 0x00200 28 * Some control bits of TIFM appear to conform to Sony's reference design,
30#define TIFM_MS_EOTPC 0x01000 29 * so I'm just assuming they all are.
31#define TIFM_MS_INT 0x02000 30 */
32
33/* The meaning of the bit majority in this constant is unknown. */
34#define TIFM_MS_SERIAL 0x04010
35 31
36#define TIFM_MS_SYS_LATCH 0x00100 32#define TIFM_MS_STAT_DRQ 0x04000
37#define TIFM_MS_SYS_NOT_RDY 0x00800 33#define TIFM_MS_STAT_MSINT 0x02000
38#define TIFM_MS_SYS_DATA 0x10000 34#define TIFM_MS_STAT_RDY 0x01000
35#define TIFM_MS_STAT_CRC 0x00200
36#define TIFM_MS_STAT_TOE 0x00100
37#define TIFM_MS_STAT_EMP 0x00020
38#define TIFM_MS_STAT_FUL 0x00010
39#define TIFM_MS_STAT_CED 0x00008
40#define TIFM_MS_STAT_ERR 0x00004
41#define TIFM_MS_STAT_BRQ 0x00002
42#define TIFM_MS_STAT_CNK 0x00001
43
44#define TIFM_MS_SYS_DMA 0x10000
45#define TIFM_MS_SYS_RESET 0x08000
46#define TIFM_MS_SYS_SRAC 0x04000
47#define TIFM_MS_SYS_INTEN 0x02000
48#define TIFM_MS_SYS_NOCRC 0x01000
49#define TIFM_MS_SYS_INTCLR 0x00800
50#define TIFM_MS_SYS_MSIEN 0x00400
51#define TIFM_MS_SYS_FCLR 0x00200
52#define TIFM_MS_SYS_FDIR 0x00100
53#define TIFM_MS_SYS_DAM 0x00080
54#define TIFM_MS_SYS_DRM 0x00040
55#define TIFM_MS_SYS_DRQSL 0x00020
56#define TIFM_MS_SYS_REI 0x00010
57#define TIFM_MS_SYS_REO 0x00008
58#define TIFM_MS_SYS_BSY_MASK 0x00007
59
60#define TIFM_MS_SYS_FIFO (TIFM_MS_SYS_INTEN | TIFM_MS_SYS_MSIEN \
61 | TIFM_MS_SYS_FCLR | TIFM_MS_SYS_BSY_MASK)
39 62
40/* Hardware flags */ 63/* Hardware flags */
41enum { 64enum {
42 CMD_READY = 0x0001, 65 CMD_READY = 0x01,
43 FIFO_READY = 0x0002, 66 FIFO_READY = 0x02,
44 CARD_READY = 0x0004, 67 CARD_INT = 0x04
45 DATA_CARRY = 0x0008
46}; 68};
47 69
48struct tifm_ms { 70struct tifm_ms {
49 struct tifm_dev *dev; 71 struct tifm_dev *dev;
50 unsigned short eject:1, 72 struct timer_list timer;
51 no_dma:1; 73 struct memstick_request *req;
52 unsigned short cmd_flags;
53 unsigned int mode_mask; 74 unsigned int mode_mask;
54 unsigned int block_pos; 75 unsigned int block_pos;
55 unsigned long timeout_jiffies; 76 unsigned long timeout_jiffies;
56 77 unsigned char eject:1,
57 struct timer_list timer; 78 use_dma:1;
58 struct memstick_request *req; 79 unsigned char cmd_flags;
80 unsigned char io_pos;
59 unsigned int io_word; 81 unsigned int io_word;
60}; 82};
61 83
62static void tifm_ms_read_fifo(struct tifm_ms *host, unsigned int fifo_offset, 84static unsigned int tifm_ms_read_data(struct tifm_ms *host,
63 struct page *pg, unsigned int page_off, 85 unsigned char *buf, unsigned int length)
64 unsigned int length)
65{ 86{
66 struct tifm_dev *sock = host->dev; 87 struct tifm_dev *sock = host->dev;
67 unsigned int cnt = 0, off = 0; 88 unsigned int off = 0;
68 unsigned char *buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + page_off; 89
90 while (host->io_pos && length) {
91 buf[off++] = host->io_word & 0xff;
92 host->io_word >>= 8;
93 length--;
94 host->io_pos--;
95 }
69 96
70 if (host->cmd_flags & DATA_CARRY) { 97 if (!length)
71 while ((fifo_offset & 3) && length) { 98 return off;
99
100 while (!(TIFM_MS_STAT_EMP & readl(sock->addr + SOCK_MS_STATUS))) {
101 if (length < 4)
102 break;
103 *(unsigned int *)(buf + off) = __raw_readl(sock->addr
104 + SOCK_MS_DATA);
105 length -= 4;
106 off += 4;
107 }
108
109 if (length
110 && !(TIFM_MS_STAT_EMP & readl(sock->addr + SOCK_MS_STATUS))) {
111 host->io_word = readl(sock->addr + SOCK_MS_DATA);
112 for (host->io_pos = 4; host->io_pos; --host->io_pos) {
72 buf[off++] = host->io_word & 0xff; 113 buf[off++] = host->io_word & 0xff;
73 host->io_word >>= 8; 114 host->io_word >>= 8;
74 length--; 115 length--;
75 fifo_offset++; 116 if (!length)
117 break;
76 } 118 }
77 if (!(fifo_offset & 3))
78 host->cmd_flags &= ~DATA_CARRY;
79 if (!length)
80 return;
81 } 119 }
82 120
83 do { 121 return off;
84 host->io_word = readl(sock->addr + SOCK_FIFO_ACCESS
85 + fifo_offset);
86 cnt = 4;
87 while (length && cnt) {
88 buf[off++] = (host->io_word >> 8) & 0xff;
89 cnt--;
90 length--;
91 }
92 fifo_offset += 4 - cnt;
93 } while (length);
94
95 if (cnt)
96 host->cmd_flags |= DATA_CARRY;
97
98 kunmap_atomic(buf - page_off, KM_BIO_DST_IRQ);
99} 122}
100 123
101static void tifm_ms_write_fifo(struct tifm_ms *host, unsigned int fifo_offset, 124static unsigned int tifm_ms_write_data(struct tifm_ms *host,
102 struct page *pg, unsigned int page_off, 125 unsigned char *buf, unsigned int length)
103 unsigned int length)
104{ 126{
105 struct tifm_dev *sock = host->dev; 127 struct tifm_dev *sock = host->dev;
106 unsigned int cnt = 0, off = 0; 128 unsigned int off = 0;
107 unsigned char *buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + page_off;
108 129
109 if (host->cmd_flags & DATA_CARRY) { 130 if (host->io_pos) {
110 while (fifo_offset & 3) { 131 while (host->io_pos < 4 && length) {
111 host->io_word |= buf[off++] << (8 * (fifo_offset & 3)); 132 host->io_word |= buf[off++] << (host->io_pos * 8);
133 host->io_pos++;
112 length--; 134 length--;
113 fifo_offset++;
114 } 135 }
115 if (!(fifo_offset & 3)) {
116 writel(host->io_word, sock->addr + SOCK_FIFO_ACCESS
117 + fifo_offset - 4);
118
119 host->cmd_flags &= ~DATA_CARRY;
120 }
121 if (!length)
122 return;
123 } 136 }
124 137
125 do { 138 if (host->io_pos == 4
126 cnt = 4; 139 && !(TIFM_MS_STAT_FUL & readl(sock->addr + SOCK_MS_STATUS))) {
140 writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM),
141 sock->addr + SOCK_MS_SYSTEM);
142 writel(host->io_word, sock->addr + SOCK_MS_DATA);
143 host->io_pos = 0;
127 host->io_word = 0; 144 host->io_word = 0;
128 while (length && cnt) { 145 } else if (host->io_pos) {
129 host->io_word |= buf[off++] << (4 - cnt); 146 return off;
130 cnt--; 147 }
131 length--;
132 }
133 fifo_offset += 4 - cnt;
134 if (!cnt)
135 writel(host->io_word, sock->addr + SOCK_FIFO_ACCESS
136 + fifo_offset - 4);
137
138 } while (length);
139
140 if (cnt)
141 host->cmd_flags |= DATA_CARRY;
142 148
143 kunmap_atomic(buf - page_off, KM_BIO_SRC_IRQ); 149 if (!length)
144} 150 return off;
145 151
146static void tifm_ms_move_block(struct tifm_ms *host, unsigned int length) 152 while (!(TIFM_MS_STAT_FUL & readl(sock->addr + SOCK_MS_STATUS))) {
147{ 153 if (length < 4)
148 unsigned int t_size; 154 break;
149 unsigned int off = host->req->sg.offset + host->block_pos; 155 writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM),
150 unsigned int p_off, p_cnt; 156 sock->addr + SOCK_MS_SYSTEM);
151 struct page *pg; 157 __raw_writel(*(unsigned int *)(buf + off),
152 unsigned long flags; 158 sock->addr + SOCK_MS_DATA);
159 length -= 4;
160 off += 4;
161 }
153 162
154 dev_dbg(&host->dev->dev, "moving block\n"); 163 switch (length) {
155 local_irq_save(flags); 164 case 3:
156 t_size = length; 165 host->io_word |= buf[off + 2] << 16;
157 while (t_size) { 166 host->io_pos++;
158 pg = nth_page(sg_page(&host->req->sg), off >> PAGE_SHIFT); 167 case 2:
159 p_off = offset_in_page(off); 168 host->io_word |= buf[off + 1] << 8;
160 p_cnt = PAGE_SIZE - p_off; 169 host->io_pos++;
161 p_cnt = min(p_cnt, t_size); 170 case 1:
171 host->io_word |= buf[off];
172 host->io_pos++;
173 }
162 174
163 if (host->req->data_dir == WRITE) 175 off += host->io_pos;
164 tifm_ms_write_fifo(host, length - t_size,
165 pg, p_off, p_cnt);
166 else
167 tifm_ms_read_fifo(host, length - t_size,
168 pg, p_off, p_cnt);
169 176
170 t_size -= p_cnt; 177 return off;
171 }
172 local_irq_restore(flags);
173} 178}
174 179
175static int tifm_ms_transfer_data(struct tifm_ms *host, int skip) 180static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
176{ 181{
177 struct tifm_dev *sock = host->dev; 182 struct tifm_dev *sock = host->dev;
178 unsigned int length = host->req->sg.length - host->block_pos; 183 unsigned int length;
184 unsigned int off;
185 unsigned int t_size, p_off, p_cnt;
186 unsigned char *buf;
187 struct page *pg;
188 unsigned long flags = 0;
189
190 if (host->req->long_data) {
191 length = host->req->sg.length - host->block_pos;
192 off = host->req->sg.offset + host->block_pos;
193 } else {
194 length = host->req->data_len - host->block_pos;
195 off = 0;
196 }
197 dev_dbg(&sock->dev, "fifo data transfer, %d, %d\n", length,
198 host->block_pos);
199
200 while (length) {
201 if (host->req->long_data) {
202 pg = nth_page(sg_page(&host->req->sg),
203 off >> PAGE_SHIFT);
204 p_off = offset_in_page(off);
205 p_cnt = PAGE_SIZE - p_off;
206 p_cnt = min(p_cnt, length);
207
208 local_irq_save(flags);
209 buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off;
210 } else {
211 buf = host->req->data + host->block_pos;
212 p_cnt = host->req->data_len - host->block_pos;
213 }
179 214
180 if (!length) 215 t_size = host->req->data_dir == WRITE
181 return 1; 216 ? tifm_ms_write_data(host, buf, p_cnt)
217 : tifm_ms_read_data(host, buf, p_cnt);
182 218
183 if (length > TIFM_FIFO_SIZE) 219 if (host->req->long_data) {
184 length = TIFM_FIFO_SIZE; 220 kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ);
221 local_irq_restore(flags);
222 }
185 223
186 if (!skip) { 224 if (!t_size)
187 tifm_ms_move_block(host, length); 225 break;
188 host->block_pos += length; 226 host->block_pos += t_size;
227 length -= t_size;
228 off += t_size;
189 } 229 }
190 230
191 if ((host->req->data_dir == READ) 231 dev_dbg(&sock->dev, "fifo data transfer, %d remaining\n", length);
192 && (host->block_pos == host->req->sg.length)) 232 if (!length && (host->req->data_dir == WRITE)) {
193 return 1; 233 if (host->io_pos) {
194 234 writel(TIFM_MS_SYS_FDIR
195 writel(ilog2(length) - 2, sock->addr + SOCK_FIFO_PAGE_SIZE); 235 | readl(sock->addr + SOCK_MS_SYSTEM),
196 if (host->req->data_dir == WRITE) 236 sock->addr + SOCK_MS_SYSTEM);
197 writel((1 << 8) | TIFM_DMA_TX, sock->addr + SOCK_DMA_CONTROL); 237 writel(host->io_word, sock->addr + SOCK_MS_DATA);
198 else 238 }
199 writel((1 << 8), sock->addr + SOCK_DMA_CONTROL); 239 writel(TIFM_MS_SYS_FDIR
240 | readl(sock->addr + SOCK_MS_SYSTEM),
241 sock->addr + SOCK_MS_SYSTEM);
242 writel(0, sock->addr + SOCK_MS_DATA);
243 } else {
244 readl(sock->addr + SOCK_MS_DATA);
245 }
200 246
201 return 0; 247 return length;
202} 248}
203 249
204static int tifm_ms_issue_cmd(struct tifm_ms *host) 250static int tifm_ms_issue_cmd(struct tifm_ms *host)
205{ 251{
206 struct tifm_dev *sock = host->dev; 252 struct tifm_dev *sock = host->dev;
207 unsigned char *data; 253 unsigned char *data;
208 unsigned int data_len = 0, cmd = 0, cmd_mask = 0, cnt, tval = 0; 254 unsigned int data_len, cmd, sys_param;
209 255
210 host->cmd_flags = 0; 256 host->cmd_flags = 0;
257 host->block_pos = 0;
258 host->io_pos = 0;
259 host->io_word = 0;
260 host->cmd_flags = 0;
211 261
212 if (host->req->io_type == MEMSTICK_IO_SG) { 262 data = host->req->data;
213 if (!host->no_dma) {
214 if (1 != tifm_map_sg(sock, &host->req->sg, 1,
215 host->req->data_dir == READ
216 ? PCI_DMA_FROMDEVICE
217 : PCI_DMA_TODEVICE)) {
218 host->req->error = -ENOMEM;
219 return host->req->error;
220 }
221 data_len = sg_dma_len(&host->req->sg);
222 } else
223 data_len = host->req->sg.length;
224
225 writel(TIFM_FIFO_INT_SETALL,
226 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
227 writel(TIFM_FIFO_ENABLE,
228 sock->addr + SOCK_FIFO_CONTROL);
229 writel(TIFM_FIFO_INTMASK,
230 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
231 263
232 if (!host->no_dma) { 264 host->use_dma = !no_dma;
233 writel(ilog2(data_len) - 2,
234 sock->addr + SOCK_FIFO_PAGE_SIZE);
235 writel(sg_dma_address(&host->req->sg),
236 sock->addr + SOCK_DMA_ADDRESS);
237 if (host->req->data_dir == WRITE)
238 writel((1 << 8) | TIFM_DMA_TX | TIFM_DMA_EN,
239 sock->addr + SOCK_DMA_CONTROL);
240 else
241 writel((1 << 8) | TIFM_DMA_EN,
242 sock->addr + SOCK_DMA_CONTROL);
243 } else {
244 tifm_ms_transfer_data(host,
245 host->req->data_dir == READ);
246 }
247 265
248 cmd_mask = readl(sock->addr + SOCK_MS_SYSTEM); 266 if (host->req->long_data) {
249 cmd_mask |= TIFM_MS_SYS_DATA | TIFM_MS_SYS_NOT_RDY; 267 data_len = host->req->sg.length;
250 writel(cmd_mask, sock->addr + SOCK_MS_SYSTEM); 268 if (!is_power_of_2(data_len))
251 } else if (host->req->io_type == MEMSTICK_IO_VAL) { 269 host->use_dma = 0;
252 data = host->req->data; 270 } else {
253 data_len = host->req->data_len; 271 data_len = host->req->data_len;
272 host->use_dma = 0;
273 }
254 274
255 cmd_mask = host->mode_mask | 0x2607; /* unknown constant */ 275 writel(TIFM_FIFO_INT_SETALL,
256 276 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
257 if (host->req->data_dir == WRITE) { 277 writel(TIFM_FIFO_ENABLE,
258 cmd_mask |= TIFM_MS_SYS_LATCH; 278 sock->addr + SOCK_FIFO_CONTROL);
259 writel(cmd_mask, sock->addr + SOCK_MS_SYSTEM); 279
260 for (cnt = 0; (data_len - cnt) >= 4; cnt += 4) { 280 if (host->use_dma) {
261 writel(TIFM_MS_SYS_LATCH 281 if (1 != tifm_map_sg(sock, &host->req->sg, 1,
262 | readl(sock->addr + SOCK_MS_SYSTEM), 282 host->req->data_dir == READ
263 sock->addr + SOCK_MS_SYSTEM); 283 ? PCI_DMA_FROMDEVICE
264 __raw_writel(*(unsigned int *)(data + cnt), 284 : PCI_DMA_TODEVICE)) {
265 sock->addr + SOCK_MS_DATA); 285 host->req->error = -ENOMEM;
266 dev_dbg(&sock->dev, "writing %x\n", 286 return host->req->error;
267 *(int *)(data + cnt)); 287 }
268 } 288 data_len = sg_dma_len(&host->req->sg);
269 switch (data_len - cnt) {
270 case 3:
271 tval |= data[cnt + 2] << 16;
272 case 2:
273 tval |= data[cnt + 1] << 8;
274 case 1:
275 tval |= data[cnt];
276 writel(TIFM_MS_SYS_LATCH
277 | readl(sock->addr + SOCK_MS_SYSTEM),
278 sock->addr + SOCK_MS_SYSTEM);
279 writel(tval, sock->addr + SOCK_MS_DATA);
280 dev_dbg(&sock->dev, "writing %x\n", tval);
281 }
282 289
283 writel(TIFM_MS_SYS_LATCH 290 writel(ilog2(data_len) - 2,
284 | readl(sock->addr + SOCK_MS_SYSTEM), 291 sock->addr + SOCK_FIFO_PAGE_SIZE);
285 sock->addr + SOCK_MS_SYSTEM); 292 writel(TIFM_FIFO_INTMASK,
286 writel(0, sock->addr + SOCK_MS_DATA); 293 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
287 dev_dbg(&sock->dev, "writing %x\n", 0); 294 sys_param = TIFM_DMA_EN | (1 << 8);
295 if (host->req->data_dir == WRITE)
296 sys_param |= TIFM_DMA_TX;
297
298 writel(TIFM_FIFO_INTMASK,
299 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
288 300
289 } else 301 writel(sg_dma_address(&host->req->sg),
290 writel(cmd_mask, sock->addr + SOCK_MS_SYSTEM); 302 sock->addr + SOCK_DMA_ADDRESS);
303 writel(sys_param, sock->addr + SOCK_DMA_CONTROL);
304 } else {
305 writel(host->mode_mask | TIFM_MS_SYS_FIFO,
306 sock->addr + SOCK_MS_SYSTEM);
291 307
292 cmd_mask = readl(sock->addr + SOCK_MS_SYSTEM); 308 writel(TIFM_FIFO_MORE,
293 cmd_mask &= ~TIFM_MS_SYS_DATA; 309 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
294 cmd_mask |= TIFM_MS_SYS_NOT_RDY; 310 }
295 dev_dbg(&sock->dev, "mask %x\n", cmd_mask);
296 writel(cmd_mask, sock->addr + SOCK_MS_SYSTEM);
297 } else
298 BUG();
299 311
300 mod_timer(&host->timer, jiffies + host->timeout_jiffies); 312 mod_timer(&host->timer, jiffies + host->timeout_jiffies);
301 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 313 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
302 sock->addr + SOCK_CONTROL); 314 sock->addr + SOCK_CONTROL);
303 host->req->error = 0; 315 host->req->error = 0;
304 316
317 sys_param = readl(sock->addr + SOCK_MS_SYSTEM);
318 sys_param |= TIFM_MS_SYS_INTCLR;
319
320 if (host->use_dma)
321 sys_param |= TIFM_MS_SYS_DMA;
322 else
323 sys_param &= ~TIFM_MS_SYS_DMA;
324
325 writel(sys_param, sock->addr + SOCK_MS_SYSTEM);
326
305 cmd = (host->req->tpc & 0xf) << 12; 327 cmd = (host->req->tpc & 0xf) << 12;
306 cmd |= data_len; 328 cmd |= data_len;
307 writel(cmd, sock->addr + SOCK_MS_COMMAND); 329 writel(cmd, sock->addr + SOCK_MS_COMMAND);
308 330
309 dev_dbg(&sock->dev, "executing TPC %x, %x\n", cmd, cmd_mask); 331 dev_dbg(&sock->dev, "executing TPC %x, %x\n", cmd, sys_param);
310 return 0; 332 return 0;
311} 333}
312 334
@@ -314,47 +336,20 @@ static void tifm_ms_complete_cmd(struct tifm_ms *host)
314{ 336{
315 struct tifm_dev *sock = host->dev; 337 struct tifm_dev *sock = host->dev;
316 struct memstick_host *msh = tifm_get_drvdata(sock); 338 struct memstick_host *msh = tifm_get_drvdata(sock);
317 unsigned int tval = 0, data_len;
318 unsigned char *data;
319 int rc; 339 int rc;
320 340
321 del_timer(&host->timer); 341 del_timer(&host->timer);
322 if (host->req->io_type == MEMSTICK_IO_SG) {
323 if (!host->no_dma)
324 tifm_unmap_sg(sock, &host->req->sg, 1,
325 host->req->data_dir == READ
326 ? PCI_DMA_FROMDEVICE
327 : PCI_DMA_TODEVICE);
328 } else if (host->req->io_type == MEMSTICK_IO_VAL) {
329 writel(~TIFM_MS_SYS_DATA & readl(sock->addr + SOCK_MS_SYSTEM),
330 sock->addr + SOCK_MS_SYSTEM);
331
332 data = host->req->data;
333 data_len = host->req->data_len;
334 342
335 if (host->req->data_dir == READ) { 343 if (host->use_dma)
336 for (rc = 0; (data_len - rc) >= 4; rc += 4) 344 tifm_unmap_sg(sock, &host->req->sg, 1,
337 *(int *)(data + rc) 345 host->req->data_dir == READ
338 = __raw_readl(sock->addr 346 ? PCI_DMA_FROMDEVICE
339 + SOCK_MS_DATA); 347 : PCI_DMA_TODEVICE);
340
341 if (data_len - rc)
342 tval = readl(sock->addr + SOCK_MS_DATA);
343 switch (data_len - rc) {
344 case 3:
345 data[rc + 2] = (tval >> 16) & 0xff;
346 case 2:
347 data[rc + 1] = (tval >> 8) & 0xff;
348 case 1:
349 data[rc] = tval & 0xff;
350 }
351 readl(sock->addr + SOCK_MS_DATA);
352 }
353 }
354 348
355 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 349 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
356 sock->addr + SOCK_CONTROL); 350 sock->addr + SOCK_CONTROL);
357 351
352 dev_dbg(&sock->dev, "TPC complete\n");
358 do { 353 do {
359 rc = memstick_next_req(msh, &host->req); 354 rc = memstick_next_req(msh, &host->req);
360 } while (!rc && tifm_ms_issue_cmd(host)); 355 } while (!rc && tifm_ms_issue_cmd(host));
@@ -365,11 +360,10 @@ static int tifm_ms_check_status(struct tifm_ms *host)
365 if (!host->req->error) { 360 if (!host->req->error) {
366 if (!(host->cmd_flags & CMD_READY)) 361 if (!(host->cmd_flags & CMD_READY))
367 return 1; 362 return 1;
368 if ((host->req->io_type == MEMSTICK_IO_SG) 363 if (!(host->cmd_flags & FIFO_READY))
369 && !(host->cmd_flags & FIFO_READY))
370 return 1; 364 return 1;
371 if (host->req->need_card_int 365 if (host->req->need_card_int
372 && !(host->cmd_flags & CARD_READY)) 366 && !(host->cmd_flags & CARD_INT))
373 return 1; 367 return 1;
374 } 368 }
375 return 0; 369 return 0;
@@ -379,18 +373,24 @@ static int tifm_ms_check_status(struct tifm_ms *host)
379static void tifm_ms_data_event(struct tifm_dev *sock) 373static void tifm_ms_data_event(struct tifm_dev *sock)
380{ 374{
381 struct tifm_ms *host; 375 struct tifm_ms *host;
382 unsigned int fifo_status = 0; 376 unsigned int fifo_status = 0, host_status = 0;
383 int rc = 1; 377 int rc = 1;
384 378
385 spin_lock(&sock->lock); 379 spin_lock(&sock->lock);
386 host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock)); 380 host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock));
387 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); 381 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
388 dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n", 382 host_status = readl(sock->addr + SOCK_MS_STATUS);
389 fifo_status, host->cmd_flags); 383 dev_dbg(&sock->dev,
384 "data event: fifo_status %x, host_status %x, flags %x\n",
385 fifo_status, host_status, host->cmd_flags);
390 386
391 if (host->req) { 387 if (host->req) {
392 if (fifo_status & TIFM_FIFO_READY) { 388 if (host->use_dma && (fifo_status & 1)) {
393 if (!host->no_dma || tifm_ms_transfer_data(host, 0)) { 389 host->cmd_flags |= FIFO_READY;
390 rc = tifm_ms_check_status(host);
391 }
392 if (!host->use_dma && (fifo_status & TIFM_FIFO_MORE)) {
393 if (!tifm_ms_transfer_data(host)) {
394 host->cmd_flags |= FIFO_READY; 394 host->cmd_flags |= FIFO_READY;
395 rc = tifm_ms_check_status(host); 395 rc = tifm_ms_check_status(host);
396 } 396 }
@@ -419,9 +419,9 @@ static void tifm_ms_card_event(struct tifm_dev *sock)
419 host_status, host->cmd_flags); 419 host_status, host->cmd_flags);
420 420
421 if (host->req) { 421 if (host->req) {
422 if (host_status & TIFM_MS_TIMEOUT) 422 if (host_status & TIFM_MS_STAT_TOE)
423 host->req->error = -ETIME; 423 host->req->error = -ETIME;
424 else if (host_status & TIFM_MS_BADCRC) 424 else if (host_status & TIFM_MS_STAT_CRC)
425 host->req->error = -EILSEQ; 425 host->req->error = -EILSEQ;
426 426
427 if (host->req->error) { 427 if (host->req->error) {
@@ -430,18 +430,17 @@ static void tifm_ms_card_event(struct tifm_dev *sock)
430 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); 430 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
431 } 431 }
432 432
433 if (host_status & TIFM_MS_EOTPC) 433 if (host_status & TIFM_MS_STAT_RDY)
434 host->cmd_flags |= CMD_READY; 434 host->cmd_flags |= CMD_READY;
435 if (host_status & TIFM_MS_INT) 435
436 host->cmd_flags |= CARD_READY; 436 if (host_status & TIFM_MS_STAT_MSINT)
437 host->cmd_flags |= CARD_INT;
437 438
438 rc = tifm_ms_check_status(host); 439 rc = tifm_ms_check_status(host);
439 440
440 } 441 }
441 442
442 writel(TIFM_MS_SYS_NOT_RDY | readl(sock->addr + SOCK_MS_SYSTEM), 443 writel(TIFM_MS_SYS_INTCLR | readl(sock->addr + SOCK_MS_SYSTEM),
443 sock->addr + SOCK_MS_SYSTEM);
444 writel((~TIFM_MS_SYS_DATA) & readl(sock->addr + SOCK_MS_SYSTEM),
445 sock->addr + SOCK_MS_SYSTEM); 444 sock->addr + SOCK_MS_SYSTEM);
446 445
447 if (!rc) 446 if (!rc)
@@ -497,15 +496,26 @@ static void tifm_ms_set_param(struct memstick_host *msh,
497 496
498 switch (param) { 497 switch (param) {
499 case MEMSTICK_POWER: 498 case MEMSTICK_POWER:
500 /* this is set by card detection mechanism */ 499 /* also affected by media detection mechanism */
500 if (value == MEMSTICK_POWER_ON) {
501 host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI;
502 writel(TIFM_MS_SYS_RESET, sock->addr + SOCK_MS_SYSTEM);
503 writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR,
504 sock->addr + SOCK_MS_SYSTEM);
505 writel(0xffffffff, sock->addr + SOCK_MS_STATUS);
506 } else if (value == MEMSTICK_POWER_OFF) {
507 writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR,
508 sock->addr + SOCK_MS_SYSTEM);
509 writel(0xffffffff, sock->addr + SOCK_MS_STATUS);
510 }
501 break; 511 break;
502 case MEMSTICK_INTERFACE: 512 case MEMSTICK_INTERFACE:
503 if (value == MEMSTICK_SERIAL) { 513 if (value == MEMSTICK_SERIAL) {
504 host->mode_mask = TIFM_MS_SERIAL; 514 host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI;
505 writel((~TIFM_CTRL_FAST_CLK) 515 writel((~TIFM_CTRL_FAST_CLK)
506 & readl(sock->addr + SOCK_CONTROL), 516 & readl(sock->addr + SOCK_CONTROL),
507 sock->addr + SOCK_CONTROL); 517 sock->addr + SOCK_CONTROL);
508 } else if (value == MEMSTICK_PARALLEL) { 518 } else if (value == MEMSTICK_PAR4) {
509 host->mode_mask = 0; 519 host->mode_mask = 0;
510 writel(TIFM_CTRL_FAST_CLK 520 writel(TIFM_CTRL_FAST_CLK
511 | readl(sock->addr + SOCK_CONTROL), 521 | readl(sock->addr + SOCK_CONTROL),
@@ -532,21 +542,6 @@ static void tifm_ms_abort(unsigned long data)
532 tifm_eject(host->dev); 542 tifm_eject(host->dev);
533} 543}
534 544
535static int tifm_ms_initialize_host(struct tifm_ms *host)
536{
537 struct tifm_dev *sock = host->dev;
538 struct memstick_host *msh = tifm_get_drvdata(sock);
539
540 host->mode_mask = TIFM_MS_SERIAL;
541 writel(0x8000, sock->addr + SOCK_MS_SYSTEM);
542 writel(0x0200 | TIFM_MS_SYS_NOT_RDY, sock->addr + SOCK_MS_SYSTEM);
543 writel(0xffffffff, sock->addr + SOCK_MS_STATUS);
544 if (tifm_has_ms_pif(sock))
545 msh->caps |= MEMSTICK_CAP_PARALLEL;
546
547 return 0;
548}
549
550static int tifm_ms_probe(struct tifm_dev *sock) 545static int tifm_ms_probe(struct tifm_dev *sock)
551{ 546{
552 struct memstick_host *msh; 547 struct memstick_host *msh;
@@ -568,7 +563,6 @@ static int tifm_ms_probe(struct tifm_dev *sock)
568 tifm_set_drvdata(sock, msh); 563 tifm_set_drvdata(sock, msh);
569 host->dev = sock; 564 host->dev = sock;
570 host->timeout_jiffies = msecs_to_jiffies(1000); 565 host->timeout_jiffies = msecs_to_jiffies(1000);
571 host->no_dma = no_dma;
572 566
573 setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host); 567 setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host);
574 568
@@ -576,10 +570,10 @@ static int tifm_ms_probe(struct tifm_dev *sock)
576 msh->set_param = tifm_ms_set_param; 570 msh->set_param = tifm_ms_set_param;
577 sock->card_event = tifm_ms_card_event; 571 sock->card_event = tifm_ms_card_event;
578 sock->data_event = tifm_ms_data_event; 572 sock->data_event = tifm_ms_data_event;
579 rc = tifm_ms_initialize_host(host); 573 if (tifm_has_ms_pif(sock))
574 msh->caps |= MEMSTICK_CAP_PAR4;
580 575
581 if (!rc) 576 rc = memstick_add_host(msh);
582 rc = memstick_add_host(msh);
583 if (!rc) 577 if (!rc)
584 return 0; 578 return 0;
585 579
@@ -601,7 +595,7 @@ static void tifm_ms_remove(struct tifm_dev *sock)
601 writel(TIFM_FIFO_INT_SETALL, 595 writel(TIFM_FIFO_INT_SETALL,
602 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 596 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
603 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); 597 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
604 if ((host->req->io_type == MEMSTICK_IO_SG) && !host->no_dma) 598 if (host->use_dma)
605 tifm_unmap_sg(sock, &host->req->sg, 1, 599 tifm_unmap_sg(sock, &host->req->sg, 1,
606 host->req->data_dir == READ 600 host->req->data_dir == READ
607 ? PCI_DMA_TODEVICE 601 ? PCI_DMA_TODEVICE
@@ -617,10 +611,6 @@ static void tifm_ms_remove(struct tifm_dev *sock)
617 spin_unlock_irqrestore(&sock->lock, flags); 611 spin_unlock_irqrestore(&sock->lock, flags);
618 612
619 memstick_remove_host(msh); 613 memstick_remove_host(msh);
620
621 writel(0x0200 | TIFM_MS_SYS_NOT_RDY, sock->addr + SOCK_MS_SYSTEM);
622 writel(0xffffffff, sock->addr + SOCK_MS_STATUS);
623
624 memstick_free_host(msh); 614 memstick_free_host(msh);
625} 615}
626 616
@@ -628,17 +618,17 @@ static void tifm_ms_remove(struct tifm_dev *sock)
628 618
629static int tifm_ms_suspend(struct tifm_dev *sock, pm_message_t state) 619static int tifm_ms_suspend(struct tifm_dev *sock, pm_message_t state)
630{ 620{
621 struct memstick_host *msh = tifm_get_drvdata(sock);
622
623 memstick_suspend_host(msh);
631 return 0; 624 return 0;
632} 625}
633 626
634static int tifm_ms_resume(struct tifm_dev *sock) 627static int tifm_ms_resume(struct tifm_dev *sock)
635{ 628{
636 struct memstick_host *msh = tifm_get_drvdata(sock); 629 struct memstick_host *msh = tifm_get_drvdata(sock);
637 struct tifm_ms *host = memstick_priv(msh);
638
639 tifm_ms_initialize_host(host);
640 memstick_detect_change(msh);
641 630
631 memstick_resume_host(msh);
642 return 0; 632 return 0;
643} 633}
644 634
@@ -679,7 +669,6 @@ MODULE_AUTHOR("Alex Dubov");
679MODULE_DESCRIPTION("TI FlashMedia MemoryStick driver"); 669MODULE_DESCRIPTION("TI FlashMedia MemoryStick driver");
680MODULE_LICENSE("GPL"); 670MODULE_LICENSE("GPL");
681MODULE_DEVICE_TABLE(tifm, tifm_ms_id_tbl); 671MODULE_DEVICE_TABLE(tifm, tifm_ms_id_tbl);
682MODULE_VERSION(DRIVER_VERSION);
683 672
684module_init(tifm_ms_init); 673module_init(tifm_ms_init);
685module_exit(tifm_ms_exit); 674module_exit(tifm_ms_exit);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index bfda731696f7..6b6df8679585 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -632,8 +632,7 @@ mpt_deregister(u8 cb_idx)
632 632
633/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 633/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
634/** 634/**
635 * mpt_event_register - Register protocol-specific event callback 635 * mpt_event_register - Register protocol-specific event callback handler.
636 * handler.
637 * @cb_idx: previously registered (via mpt_register) callback handle 636 * @cb_idx: previously registered (via mpt_register) callback handle
638 * @ev_cbfunc: callback function 637 * @ev_cbfunc: callback function
639 * 638 *
@@ -654,8 +653,7 @@ mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
654 653
655/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 654/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
656/** 655/**
657 * mpt_event_deregister - Deregister protocol-specific event callback 656 * mpt_event_deregister - Deregister protocol-specific event callback handler
658 * handler.
659 * @cb_idx: previously registered callback handle 657 * @cb_idx: previously registered callback handle
660 * 658 *
661 * Each protocol-specific driver should call this routine 659 * Each protocol-specific driver should call this routine
@@ -765,11 +763,13 @@ mpt_device_driver_deregister(u8 cb_idx)
765 763
766/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 764/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
767/** 765/**
768 * mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024) 766 * mpt_get_msg_frame - Obtain an MPT request frame from the pool
769 * allocated per MPT adapter.
770 * @cb_idx: Handle of registered MPT protocol driver 767 * @cb_idx: Handle of registered MPT protocol driver
771 * @ioc: Pointer to MPT adapter structure 768 * @ioc: Pointer to MPT adapter structure
772 * 769 *
770 * Obtain an MPT request frame from the pool (of 1024) that are
771 * allocated per MPT adapter.
772 *
773 * Returns pointer to a MPT request frame or %NULL if none are available 773 * Returns pointer to a MPT request frame or %NULL if none are available
774 * or IOC is not active. 774 * or IOC is not active.
775 */ 775 */
@@ -834,13 +834,12 @@ mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
834 834
835/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 835/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
836/** 836/**
837 * mpt_put_msg_frame - Send a protocol specific MPT request frame 837 * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC
838 * to a IOC.
839 * @cb_idx: Handle of registered MPT protocol driver 838 * @cb_idx: Handle of registered MPT protocol driver
840 * @ioc: Pointer to MPT adapter structure 839 * @ioc: Pointer to MPT adapter structure
841 * @mf: Pointer to MPT request frame 840 * @mf: Pointer to MPT request frame
842 * 841 *
843 * This routine posts a MPT request frame to the request post FIFO of a 842 * This routine posts an MPT request frame to the request post FIFO of a
844 * specific MPT adapter. 843 * specific MPT adapter.
845 */ 844 */
846void 845void
@@ -868,13 +867,15 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
868} 867}
869 868
870/** 869/**
871 * mpt_put_msg_frame_hi_pri - Send a protocol specific MPT request frame 870 * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame
872 * to a IOC using hi priority request queue.
873 * @cb_idx: Handle of registered MPT protocol driver 871 * @cb_idx: Handle of registered MPT protocol driver
874 * @ioc: Pointer to MPT adapter structure 872 * @ioc: Pointer to MPT adapter structure
875 * @mf: Pointer to MPT request frame 873 * @mf: Pointer to MPT request frame
876 * 874 *
877 * This routine posts a MPT request frame to the request post FIFO of a 875 * Send a protocol-specific MPT request frame to an IOC using
876 * hi-priority request queue.
877 *
878 * This routine posts an MPT request frame to the request post FIFO of a
878 * specific MPT adapter. 879 * specific MPT adapter.
879 **/ 880 **/
880void 881void
@@ -1481,15 +1482,15 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1481 1482
1482 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 1483 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1483 if (pci_enable_device_mem(pdev)) { 1484 if (pci_enable_device_mem(pdev)) {
1484 kfree(ioc);
1485 printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() " 1485 printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
1486 "failed\n", ioc->name); 1486 "failed\n", ioc->name);
1487 kfree(ioc);
1487 return r; 1488 return r;
1488 } 1489 }
1489 if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) { 1490 if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
1490 kfree(ioc);
1491 printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with " 1491 printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
1492 "MEM failed\n", ioc->name); 1492 "MEM failed\n", ioc->name);
1493 kfree(ioc);
1493 return r; 1494 return r;
1494 } 1495 }
1495 1496
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index d83ea96fe135..caadc68c3000 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -923,7 +923,7 @@ extern struct proc_dir_entry *mpt_proc_root_dir;
923/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 923/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
924#endif /* } __KERNEL__ */ 924#endif /* } __KERNEL__ */
925 925
926#if defined(__alpha__) || defined(__sparc_v9__) || defined(__ia64__) || defined(__x86_64__) || defined(__powerpc__) 926#ifdef CONFIG_64BIT
927#define CAST_U32_TO_PTR(x) ((void *)(u64)x) 927#define CAST_U32_TO_PTR(x) ((void *)(u64)x)
928#define CAST_PTR_TO_U32(x) ((u32)(u64)x) 928#define CAST_PTR_TO_U32(x) ((u32)(u64)x)
929#else 929#else
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index f77b329f6923..78734e25edd5 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1701,6 +1701,11 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
1701 if (error) 1701 if (error)
1702 goto out_free_consistent; 1702 goto out_free_consistent;
1703 1703
1704 if (!buffer->NumPhys) {
1705 error = -ENODEV;
1706 goto out_free_consistent;
1707 }
1708
1704 /* save config data */ 1709 /* save config data */
1705 port_info->num_phys = buffer->NumPhys; 1710 port_info->num_phys = buffer->NumPhys;
1706 port_info->phy_info = kcalloc(port_info->num_phys, 1711 port_info->phy_info = kcalloc(port_info->num_phys,
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index af1de0ccee2f..0c252f60c4c1 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1533,7 +1533,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
1533 * 1533 *
1534 * Remark: Currently invoked from a non-interrupt thread (_bh). 1534 * Remark: Currently invoked from a non-interrupt thread (_bh).
1535 * 1535 *
1536 * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC 1536 * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC
1537 * will be active. 1537 * will be active.
1538 * 1538 *
1539 * Returns 0 for SUCCESS, or %FAILED. 1539 * Returns 0 for SUCCESS, or %FAILED.
@@ -2537,14 +2537,12 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
2537 2537
2538/** 2538/**
2539 * mptscsih_get_scsi_lookup 2539 * mptscsih_get_scsi_lookup
2540 *
2541 * retrieves scmd entry from ScsiLookup[] array list
2542 *
2543 * @ioc: Pointer to MPT_ADAPTER structure 2540 * @ioc: Pointer to MPT_ADAPTER structure
2544 * @i: index into the array 2541 * @i: index into the array
2545 * 2542 *
2546 * Returns the scsi_cmd pointer 2543 * retrieves scmd entry from ScsiLookup[] array list
2547 * 2544 *
2545 * Returns the scsi_cmd pointer
2548 **/ 2546 **/
2549static struct scsi_cmnd * 2547static struct scsi_cmnd *
2550mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) 2548mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
@@ -2561,14 +2559,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
2561 2559
2562/** 2560/**
2563 * mptscsih_getclear_scsi_lookup 2561 * mptscsih_getclear_scsi_lookup
2564 *
2565 * retrieves and clears scmd entry from ScsiLookup[] array list
2566 *
2567 * @ioc: Pointer to MPT_ADAPTER structure 2562 * @ioc: Pointer to MPT_ADAPTER structure
2568 * @i: index into the array 2563 * @i: index into the array
2569 * 2564 *
2570 * Returns the scsi_cmd pointer 2565 * retrieves and clears scmd entry from ScsiLookup[] array list
2571 * 2566 *
2567 * Returns the scsi_cmd pointer
2572 **/ 2568 **/
2573static struct scsi_cmnd * 2569static struct scsi_cmnd *
2574mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) 2570mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index afd82966f9a0..13bac53db69a 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -48,31 +48,13 @@ struct sm501_devdata {
48 unsigned int pdev_id; 48 unsigned int pdev_id;
49 unsigned int irq; 49 unsigned int irq;
50 void __iomem *regs; 50 void __iomem *regs;
51 unsigned int rev;
51}; 52};
52 53
53#define MHZ (1000 * 1000) 54#define MHZ (1000 * 1000)
54 55
55#ifdef DEBUG 56#ifdef DEBUG
56static const unsigned int misc_div[] = { 57static const unsigned int div_tab[] = {
57 [0] = 1,
58 [1] = 2,
59 [2] = 4,
60 [3] = 8,
61 [4] = 16,
62 [5] = 32,
63 [6] = 64,
64 [7] = 128,
65 [8] = 3,
66 [9] = 6,
67 [10] = 12,
68 [11] = 24,
69 [12] = 48,
70 [13] = 96,
71 [14] = 192,
72 [15] = 384,
73};
74
75static const unsigned int px_div[] = {
76 [0] = 1, 58 [0] = 1,
77 [1] = 2, 59 [1] = 2,
78 [2] = 4, 60 [2] = 4,
@@ -101,12 +83,12 @@ static const unsigned int px_div[] = {
101 83
102static unsigned long decode_div(unsigned long pll2, unsigned long val, 84static unsigned long decode_div(unsigned long pll2, unsigned long val,
103 unsigned int lshft, unsigned int selbit, 85 unsigned int lshft, unsigned int selbit,
104 unsigned long mask, const unsigned int *dtab) 86 unsigned long mask)
105{ 87{
106 if (val & selbit) 88 if (val & selbit)
107 pll2 = 288 * MHZ; 89 pll2 = 288 * MHZ;
108 90
109 return pll2 / dtab[(val >> lshft) & mask]; 91 return pll2 / div_tab[(val >> lshft) & mask];
110} 92}
111 93
112#define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) 94#define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x)
@@ -141,10 +123,10 @@ static void sm501_dump_clk(struct sm501_devdata *sm)
141 } 123 }
142 124
143 sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; 125 sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ;
144 sdclk0 /= misc_div[((misct >> 8) & 0xf)]; 126 sdclk0 /= div_tab[((misct >> 8) & 0xf)];
145 127
146 sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; 128 sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ;
147 sdclk1 /= misc_div[((misct >> 16) & 0xf)]; 129 sdclk1 /= div_tab[((misct >> 16) & 0xf)];
148 130
149 dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", 131 dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n",
150 misct, pm0, pm1); 132 misct, pm0, pm1);
@@ -158,19 +140,19 @@ static void sm501_dump_clk(struct sm501_devdata *sm)
158 "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " 140 "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), "
159 "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", 141 "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n",
160 (pmc & 3 ) == 0 ? '*' : '-', 142 (pmc & 3 ) == 0 ? '*' : '-',
161 fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31, px_div)), 143 fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31)),
162 fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15, misc_div)), 144 fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15)),
163 fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15, misc_div)), 145 fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15)),
164 fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15, misc_div))); 146 fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15)));
165 147
166 dev_dbg(sm->dev, "PM1[%c]: " 148 dev_dbg(sm->dev, "PM1[%c]: "
167 "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " 149 "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), "
168 "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", 150 "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n",
169 (pmc & 3 ) == 1 ? '*' : '-', 151 (pmc & 3 ) == 1 ? '*' : '-',
170 fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31, px_div)), 152 fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31)),
171 fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15, misc_div)), 153 fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15)),
172 fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15, misc_div)), 154 fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15)),
173 fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15, misc_div))); 155 fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15)));
174} 156}
175 157
176static void sm501_dump_regs(struct sm501_devdata *sm) 158static void sm501_dump_regs(struct sm501_devdata *sm)
@@ -436,46 +418,108 @@ struct sm501_clock {
436 unsigned long mclk; 418 unsigned long mclk;
437 int divider; 419 int divider;
438 int shift; 420 int shift;
421 unsigned int m, n, k;
439}; 422};
440 423
424/* sm501_calc_clock
425 *
426 * Calculates the nearest discrete clock frequency that
427 * can be achieved with the specified input clock.
428 * the maximum divisor is 3 or 5
429 */
430
431static int sm501_calc_clock(unsigned long freq,
432 struct sm501_clock *clock,
433 int max_div,
434 unsigned long mclk,
435 long *best_diff)
436{
437 int ret = 0;
438 int divider;
439 int shift;
440 long diff;
441
442 /* try dividers 1 and 3 for CRT and for panel,
443 try divider 5 for panel only.*/
444
445 for (divider = 1; divider <= max_div; divider += 2) {
446 /* try all 8 shift values.*/
447 for (shift = 0; shift < 8; shift++) {
448 /* Calculate difference to requested clock */
449 diff = sm501fb_round_div(mclk, divider << shift) - freq;
450 if (diff < 0)
451 diff = -diff;
452
453 /* If it is less than the current, use it */
454 if (diff < *best_diff) {
455 *best_diff = diff;
456
457 clock->mclk = mclk;
458 clock->divider = divider;
459 clock->shift = shift;
460 ret = 1;
461 }
462 }
463 }
464
465 return ret;
466}
467
468/* sm501_calc_pll
469 *
470 * Calculates the nearest discrete clock frequency that can be
471 * achieved using the programmable PLL.
472 * the maximum divisor is 3 or 5
473 */
474
475static unsigned long sm501_calc_pll(unsigned long freq,
476 struct sm501_clock *clock,
477 int max_div)
478{
479 unsigned long mclk;
480 unsigned int m, n, k;
481 long best_diff = 999999999;
482
483 /*
484 * The SM502 datasheet doesn't specify the min/max values for M and N.
485 * N = 1 at least doesn't work in practice.
486 */
487 for (m = 2; m <= 255; m++) {
488 for (n = 2; n <= 127; n++) {
489 for (k = 0; k <= 1; k++) {
490 mclk = (24000000UL * m / n) >> k;
491
492 if (sm501_calc_clock(freq, clock, max_div,
493 mclk, &best_diff)) {
494 clock->m = m;
495 clock->n = n;
496 clock->k = k;
497 }
498 }
499 }
500 }
501
502 /* Return best clock. */
503 return clock->mclk / (clock->divider << clock->shift);
504}
505
441/* sm501_select_clock 506/* sm501_select_clock
442 * 507 *
443 * selects nearest discrete clock frequency the SM501 can achive 508 * Calculates the nearest discrete clock frequency that can be
509 * achieved using the 288MHz and 336MHz PLLs.
444 * the maximum divisor is 3 or 5 510 * the maximum divisor is 3 or 5
445 */ 511 */
512
446static unsigned long sm501_select_clock(unsigned long freq, 513static unsigned long sm501_select_clock(unsigned long freq,
447 struct sm501_clock *clock, 514 struct sm501_clock *clock,
448 int max_div) 515 int max_div)
449{ 516{
450 unsigned long mclk; 517 unsigned long mclk;
451 int divider;
452 int shift;
453 long diff;
454 long best_diff = 999999999; 518 long best_diff = 999999999;
455 519
456 /* Try 288MHz and 336MHz clocks. */ 520 /* Try 288MHz and 336MHz clocks. */
457 for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { 521 for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) {
458 /* try dividers 1 and 3 for CRT and for panel, 522 sm501_calc_clock(freq, clock, max_div, mclk, &best_diff);
459 try divider 5 for panel only.*/
460
461 for (divider = 1; divider <= max_div; divider += 2) {
462 /* try all 8 shift values.*/
463 for (shift = 0; shift < 8; shift++) {
464 /* Calculate difference to requested clock */
465 diff = sm501fb_round_div(mclk, divider << shift) - freq;
466 if (diff < 0)
467 diff = -diff;
468
469 /* If it is less than the current, use it */
470 if (diff < best_diff) {
471 best_diff = diff;
472
473 clock->mclk = mclk;
474 clock->divider = divider;
475 clock->shift = shift;
476 }
477 }
478 }
479 } 523 }
480 524
481 /* Return best clock. */ 525 /* Return best clock. */
@@ -497,6 +541,7 @@ unsigned long sm501_set_clock(struct device *dev,
497 unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); 541 unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE);
498 unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); 542 unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK);
499 unsigned char reg; 543 unsigned char reg;
544 unsigned int pll_reg = 0;
500 unsigned long sm501_freq; /* the actual frequency acheived */ 545 unsigned long sm501_freq; /* the actual frequency acheived */
501 546
502 struct sm501_clock to; 547 struct sm501_clock to;
@@ -511,14 +556,28 @@ unsigned long sm501_set_clock(struct device *dev,
511 * requested frequency the value must be multiplied by 556 * requested frequency the value must be multiplied by
512 * 2. This clock also has an additional pre divisor */ 557 * 2. This clock also has an additional pre divisor */
513 558
514 sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); 559 if (sm->rev >= 0xC0) {
515 reg=to.shift & 0x07;/* bottom 3 bits are shift */ 560 /* SM502 -> use the programmable PLL */
516 if (to.divider == 3) 561 sm501_freq = (sm501_calc_pll(2 * req_freq,
517 reg |= 0x08; /* /3 divider required */ 562 &to, 5) / 2);
518 else if (to.divider == 5) 563 reg = to.shift & 0x07;/* bottom 3 bits are shift */
519 reg |= 0x10; /* /5 divider required */ 564 if (to.divider == 3)
520 if (to.mclk != 288000000) 565 reg |= 0x08; /* /3 divider required */
521 reg |= 0x20; /* which mclk pll is source */ 566 else if (to.divider == 5)
567 reg |= 0x10; /* /5 divider required */
568 reg |= 0x40; /* select the programmable PLL */
569 pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m;
570 } else {
571 sm501_freq = (sm501_select_clock(2 * req_freq,
572 &to, 5) / 2);
573 reg = to.shift & 0x07;/* bottom 3 bits are shift */
574 if (to.divider == 3)
575 reg |= 0x08; /* /3 divider required */
576 else if (to.divider == 5)
577 reg |= 0x10; /* /5 divider required */
578 if (to.mclk != 288000000)
579 reg |= 0x20; /* which mclk pll is source */
580 }
522 break; 581 break;
523 582
524 case SM501_CLOCK_V2XCLK: 583 case SM501_CLOCK_V2XCLK:
@@ -579,6 +638,10 @@ unsigned long sm501_set_clock(struct device *dev,
579 } 638 }
580 639
581 writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); 640 writel(mode, sm->regs + SM501_POWER_MODE_CONTROL);
641
642 if (pll_reg)
643 writel(pll_reg, sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL);
644
582 sm501_sync_regs(sm); 645 sm501_sync_regs(sm);
583 646
584 dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", 647 dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n",
@@ -599,15 +662,24 @@ EXPORT_SYMBOL_GPL(sm501_set_clock);
599 * finds the closest available frequency for a given clock 662 * finds the closest available frequency for a given clock
600*/ 663*/
601 664
602unsigned long sm501_find_clock(int clksrc, 665unsigned long sm501_find_clock(struct device *dev,
666 int clksrc,
603 unsigned long req_freq) 667 unsigned long req_freq)
604{ 668{
669 struct sm501_devdata *sm = dev_get_drvdata(dev);
605 unsigned long sm501_freq; /* the frequency achiveable by the 501 */ 670 unsigned long sm501_freq; /* the frequency achiveable by the 501 */
606 struct sm501_clock to; 671 struct sm501_clock to;
607 672
608 switch (clksrc) { 673 switch (clksrc) {
609 case SM501_CLOCK_P2XCLK: 674 case SM501_CLOCK_P2XCLK:
610 sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); 675 if (sm->rev >= 0xC0) {
676 /* SM502 -> use the programmable PLL */
677 sm501_freq = (sm501_calc_pll(2 * req_freq,
678 &to, 5) / 2);
679 } else {
680 sm501_freq = (sm501_select_clock(2 * req_freq,
681 &to, 5) / 2);
682 }
611 break; 683 break;
612 684
613 case SM501_CLOCK_V2XCLK: 685 case SM501_CLOCK_V2XCLK:
@@ -914,6 +986,8 @@ static int sm501_init_dev(struct sm501_devdata *sm)
914 dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", 986 dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n",
915 sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); 987 sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq);
916 988
989 sm->rev = devid & SM501_DEVICEID_REVMASK;
990
917 sm501_dump_gate(sm); 991 sm501_dump_gate(sm);
918 992
919 ret = device_create_file(sm->dev, &dev_attr_dbg_regs); 993 ret = device_create_file(sm->dev, &dev_attr_dbg_regs);
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index bb269d0c677e..6cb781262f94 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -1078,7 +1078,8 @@ static int hotkey_get_tablet_mode(int *status)
1078 if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) 1078 if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
1079 return -EIO; 1079 return -EIO;
1080 1080
1081 return ((s & TP_HOTKEY_TABLET_MASK) != 0); 1081 *status = ((s & TP_HOTKEY_TABLET_MASK) != 0);
1082 return 0;
1082} 1083}
1083 1084
1084/* 1085/*
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 63a089b29545..67503ea71d21 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -368,6 +368,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
368 goto err_out_irq; 368 goto err_out_irq;
369 369
370 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), 370 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
371 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
372 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
371 fm->addr + FM_SET_INTERRUPT_ENABLE); 373 fm->addr + FM_SET_INTERRUPT_ENABLE);
372 return 0; 374 return 0;
373 375
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 6ac81e35355c..275960462970 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1000,8 +1000,8 @@ static int __init ubi_init(void)
1000 mutex_unlock(&ubi_devices_mutex); 1000 mutex_unlock(&ubi_devices_mutex);
1001 if (err < 0) { 1001 if (err < 0) {
1002 put_mtd_device(mtd); 1002 put_mtd_device(mtd);
1003 printk(KERN_ERR "UBI error: cannot attach %s\n", 1003 printk(KERN_ERR "UBI error: cannot attach mtd%d\n",
1004 p->name); 1004 mtd->index);
1005 goto out_detach; 1005 goto out_detach;
1006 } 1006 }
1007 } 1007 }
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 457710615261..a548c1d28fa8 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -217,11 +217,11 @@ struct ubi_volume {
217 void *upd_buf; 217 void *upd_buf;
218 218
219 int *eba_tbl; 219 int *eba_tbl;
220 int checked:1; 220 unsigned int checked:1;
221 int corrupted:1; 221 unsigned int corrupted:1;
222 int upd_marker:1; 222 unsigned int upd_marker:1;
223 int updating:1; 223 unsigned int updating:1;
224 int changing_leb:1; 224 unsigned int changing_leb:1;
225 225
226#ifdef CONFIG_MTD_UBI_GLUEBI 226#ifdef CONFIG_MTD_UBI_GLUEBI
227 /* 227 /*
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index a3ca2257e601..5be58d85c639 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -376,7 +376,9 @@ out_sysfs:
376 get_device(&vol->dev); 376 get_device(&vol->dev);
377 volume_sysfs_close(vol); 377 volume_sysfs_close(vol);
378out_gluebi: 378out_gluebi:
379 ubi_destroy_gluebi(vol); 379 if (ubi_destroy_gluebi(vol))
380 dbg_err("cannot destroy gluebi for volume %d:%d",
381 ubi->ubi_num, vol_id);
380out_cdev: 382out_cdev:
381 cdev_del(&vol->cdev); 383 cdev_del(&vol->cdev);
382out_mapping: 384out_mapping:
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 56fc3fbce838..af36b12be278 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -519,6 +519,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
519 if (ubi->autoresize_vol_id != -1) { 519 if (ubi->autoresize_vol_id != -1) {
520 ubi_err("more then one auto-resize volume (%d " 520 ubi_err("more then one auto-resize volume (%d "
521 "and %d)", ubi->autoresize_vol_id, i); 521 "and %d)", ubi->autoresize_vol_id, i);
522 kfree(vol);
522 return -EINVAL; 523 return -EINVAL;
523 } 524 }
524 525
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f337800076c0..fe7b5ec09708 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -90,6 +90,11 @@ config MACVLAN
90 This allows one to create virtual interfaces that map packets to 90 This allows one to create virtual interfaces that map packets to
91 or from specific MAC addresses to a particular interface. 91 or from specific MAC addresses to a particular interface.
92 92
93 Macvlan devices can be added using the "ip" command from the
94 iproute2 package starting with the iproute2-2.6.23 release:
95
96 "ip link add link <real dev> [ address MAC ] [ NAME ] type macvlan"
97
93 To compile this driver as a module, choose M here: the module 98 To compile this driver as a module, choose M here: the module
94 will be called macvlan. 99 will be called macvlan.
95 100
@@ -2361,14 +2366,15 @@ config GELIC_NET
2361 module will be called ps3_gelic. 2366 module will be called ps3_gelic.
2362 2367
2363config GELIC_WIRELESS 2368config GELIC_WIRELESS
2364 bool "PS3 Wireless support" 2369 bool "PS3 Wireless support"
2365 depends on GELIC_NET 2370 depends on GELIC_NET
2366 help 2371 select WIRELESS_EXT
2367 This option adds the support for the wireless feature of PS3. 2372 help
2368 If you have the wireless-less model of PS3 or have no plan to 2373 This option adds the support for the wireless feature of PS3.
2369 use wireless feature, disabling this option saves memory. As 2374 If you have the wireless-less model of PS3 or have no plan to
2370 the driver automatically distinguishes the models, you can 2375 use wireless feature, disabling this option saves memory. As
2371 safely enable this option even if you have a wireless-less model. 2376 the driver automatically distinguishes the models, you can
2377 safely enable this option even if you have a wireless-less model.
2372 2378
2373config GIANFAR 2379config GIANFAR
2374 tristate "Gianfar Ethernet" 2380 tristate "Gianfar Ethernet"
@@ -2513,7 +2519,7 @@ config CHELSIO_T3
2513 2519
2514config EHEA 2520config EHEA
2515 tristate "eHEA Ethernet support" 2521 tristate "eHEA Ethernet support"
2516 depends on IBMEBUS && INET 2522 depends on IBMEBUS && INET && SPARSEMEM
2517 select INET_LRO 2523 select INET_LRO
2518 ---help--- 2524 ---help---
2519 This driver supports the IBM pSeries eHEA ethernet adapter. 2525 This driver supports the IBM pSeries eHEA ethernet adapter.
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 5136d94923aa..b1448637107f 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -369,7 +369,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)");
369MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); 369MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
370MODULE_LICENSE("GPL"); 370MODULE_LICENSE("GPL");
371 371
372int __init init_module(void) 372static int __init ac3200_module_init(void)
373{ 373{
374 struct net_device *dev; 374 struct net_device *dev;
375 int this_dev, found = 0; 375 int this_dev, found = 0;
@@ -404,8 +404,7 @@ static void cleanup_card(struct net_device *dev)
404 iounmap(ei_status.mem); 404 iounmap(ei_status.mem);
405} 405}
406 406
407void __exit 407static void __exit ac3200_module_exit(void)
408cleanup_module(void)
409{ 408{
410 int this_dev; 409 int this_dev;
411 410
@@ -418,4 +417,6 @@ cleanup_module(void)
418 } 417 }
419 } 418 }
420} 419}
420module_init(ac3200_module_init);
421module_exit(ac3200_module_exit);
421#endif /* MODULE */ 422#endif /* MODULE */
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index c12cbdf368b1..47a8275d3962 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -569,7 +569,7 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id)
569#ifdef MODULE 569#ifdef MODULE
570static struct net_device *apne_dev; 570static struct net_device *apne_dev;
571 571
572int __init init_module(void) 572static int __init apne_module_init(void)
573{ 573{
574 apne_dev = apne_probe(-1); 574 apne_dev = apne_probe(-1);
575 if (IS_ERR(apne_dev)) 575 if (IS_ERR(apne_dev))
@@ -577,7 +577,7 @@ int __init init_module(void)
577 return 0; 577 return 0;
578} 578}
579 579
580void __exit cleanup_module(void) 580static void __exit apne_module_exit(void)
581{ 581{
582 unregister_netdev(apne_dev); 582 unregister_netdev(apne_dev);
583 583
@@ -591,7 +591,8 @@ void __exit cleanup_module(void)
591 591
592 free_netdev(apne_dev); 592 free_netdev(apne_dev);
593} 593}
594 594module_init(apne_module_init);
595module_exit(apne_module_exit);
595#endif 596#endif
596 597
597static int init_pcmcia(void) 598static int init_pcmcia(void)
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 6ab2c2d4d673..fef5560bc7a2 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1252,7 +1252,7 @@ module_param(irq, int, 0);
1252module_param(dma, int, 0); 1252module_param(dma, int, 0);
1253 1253
1254 1254
1255int __init init_module(void) 1255static int __init ltpc_module_init(void)
1256{ 1256{
1257 if(io == 0) 1257 if(io == 0)
1258 printk(KERN_NOTICE 1258 printk(KERN_NOTICE
@@ -1263,6 +1263,7 @@ int __init init_module(void)
1263 return PTR_ERR(dev_ltpc); 1263 return PTR_ERR(dev_ltpc);
1264 return 0; 1264 return 0;
1265} 1265}
1266module_init(ltpc_module_init);
1266#endif 1267#endif
1267 1268
1268static void __exit ltpc_cleanup(void) 1269static void __exit ltpc_cleanup(void)
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index cc4610db6395..02cb8f1c1148 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -80,17 +80,19 @@ void arcnet_cap_init(void)
80 80
81#ifdef MODULE 81#ifdef MODULE
82 82
83int __init init_module(void) 83static int __init capmode_module_init(void)
84{ 84{
85 printk(VERSION); 85 printk(VERSION);
86 arcnet_cap_init(); 86 arcnet_cap_init();
87 return 0; 87 return 0;
88} 88}
89 89
90void cleanup_module(void) 90static void __exit capmode_module_exit(void)
91{ 91{
92 arcnet_unregister_proto(&capmode_proto); 92 arcnet_unregister_proto(&capmode_proto);
93} 93}
94module_init(capmode_module_init);
95module_exit(capmode_module_exit);
94 96
95MODULE_LICENSE("GPL"); 97MODULE_LICENSE("GPL");
96#endif /* MODULE */ 98#endif /* MODULE */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index b74dbeef8050..13c293b286de 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -336,8 +336,6 @@ struct lance_addr {
336 336
337/***************************** Prototypes *****************************/ 337/***************************** Prototypes *****************************/
338 338
339static int addr_accessible( volatile void *regp, int wordflag, int
340 writeflag );
341static unsigned long lance_probe1( struct net_device *dev, struct lance_addr 339static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
342 *init_rec ); 340 *init_rec );
343static int lance_open( struct net_device *dev ); 341static int lance_open( struct net_device *dev );
@@ -406,7 +404,8 @@ struct net_device * __init atarilance_probe(int unit)
406 404
407/* Derived from hwreg_present() in atari/config.c: */ 405/* Derived from hwreg_present() in atari/config.c: */
408 406
409static int __init addr_accessible( volatile void *regp, int wordflag, int writeflag ) 407static noinline int __init addr_accessible(volatile void *regp, int wordflag,
408 int writeflag)
410{ 409{
411 int ret; 410 int ret;
412 long flags; 411 long flags;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 471c7f3e8a4a..15853be4680a 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56 56
57#define DRV_MODULE_NAME "bnx2" 57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 58#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.7.3" 59#define DRV_MODULE_VERSION "1.7.4"
60#define DRV_MODULE_RELDATE "January 29, 2008" 60#define DRV_MODULE_RELDATE "February 18, 2008"
61 61
62#define RUN_AT(x) (jiffies + (x)) 62#define RUN_AT(x) (jiffies + (x))
63 63
@@ -1273,14 +1273,20 @@ bnx2_set_link(struct bnx2 *bp)
1273 1273
1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && 1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) { 1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1276 u32 val; 1276 u32 val, an_dbg;
1277 1277
1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { 1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1279 bnx2_5706s_force_link_dn(bp, 0); 1279 bnx2_5706s_force_link_dn(bp, 0);
1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; 1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1281 } 1281 }
1282 val = REG_RD(bp, BNX2_EMAC_STATUS); 1282 val = REG_RD(bp, BNX2_EMAC_STATUS);
1283 if (val & BNX2_EMAC_STATUS_LINK) 1283
1284 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1285 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1286 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1287
1288 if ((val & BNX2_EMAC_STATUS_LINK) &&
1289 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1284 bmsr |= BMSR_LSTATUS; 1290 bmsr |= BMSR_LSTATUS;
1285 else 1291 else
1286 bmsr &= ~BMSR_LSTATUS; 1292 bmsr &= ~BMSR_LSTATUS;
@@ -5356,11 +5362,15 @@ bnx2_test_intr(struct bnx2 *bp)
5356 return -ENODEV; 5362 return -ENODEV;
5357} 5363}
5358 5364
5365/* Determining link for parallel detection. */
5359static int 5366static int
5360bnx2_5706_serdes_has_link(struct bnx2 *bp) 5367bnx2_5706_serdes_has_link(struct bnx2 *bp)
5361{ 5368{
5362 u32 mode_ctl, an_dbg, exp; 5369 u32 mode_ctl, an_dbg, exp;
5363 5370
5371 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5372 return 0;
5373
5364 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL); 5374 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5365 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl); 5375 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5366 5376
@@ -5390,13 +5400,6 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5390 int check_link = 1; 5400 int check_link = 1;
5391 5401
5392 spin_lock(&bp->phy_lock); 5402 spin_lock(&bp->phy_lock);
5393 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
5394 bnx2_5706s_force_link_dn(bp, 0);
5395 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
5396 spin_unlock(&bp->phy_lock);
5397 return;
5398 }
5399
5400 if (bp->serdes_an_pending) { 5403 if (bp->serdes_an_pending) {
5401 bp->serdes_an_pending--; 5404 bp->serdes_an_pending--;
5402 check_link = 0; 5405 check_link = 0;
@@ -5420,7 +5423,6 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5420 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) { 5423 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5421 u32 phy2; 5424 u32 phy2;
5422 5425
5423 check_link = 0;
5424 bnx2_write_phy(bp, 0x17, 0x0f01); 5426 bnx2_write_phy(bp, 0x17, 0x0f01);
5425 bnx2_read_phy(bp, 0x15, &phy2); 5427 bnx2_read_phy(bp, 0x15, &phy2);
5426 if (phy2 & 0x20) { 5428 if (phy2 & 0x20) {
@@ -5435,17 +5437,21 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5435 } else 5437 } else
5436 bp->current_interval = bp->timer_interval; 5438 bp->current_interval = bp->timer_interval;
5437 5439
5438 if (bp->link_up && (bp->autoneg & AUTONEG_SPEED) && check_link) { 5440 if (check_link) {
5439 u32 val; 5441 u32 val;
5440 5442
5441 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); 5443 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5442 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); 5444 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5443 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); 5445 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5444 5446
5445 if (val & MISC_SHDW_AN_DBG_NOSYNC) { 5447 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5446 bnx2_5706s_force_link_dn(bp, 1); 5448 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5447 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN; 5449 bnx2_5706s_force_link_dn(bp, 1);
5448 } 5450 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5451 } else
5452 bnx2_set_link(bp);
5453 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5454 bnx2_set_link(bp);
5449 } 5455 }
5450 spin_unlock(&bp->phy_lock); 5456 spin_unlock(&bp->phy_lock);
5451} 5457}
@@ -7326,7 +7332,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7326 bp->flags |= BNX2_FLAG_NO_WOL; 7332 bp->flags |= BNX2_FLAG_NO_WOL;
7327 bp->wol = 0; 7333 bp->wol = 0;
7328 } 7334 }
7329 if (CHIP_NUM(bp) != CHIP_NUM_5706) { 7335 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7336 /* Don't do parallel detect on this board because of
7337 * some board problems. The link will not go down
7338 * if we do parallel detect.
7339 */
7340 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7341 pdev->subsystem_device == 0x310c)
7342 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7343 } else {
7330 bp->phy_addr = 2; 7344 bp->phy_addr = 2;
7331 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) 7345 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7332 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; 7346 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 3aa0364942e2..1eaf5bb3d9c2 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6673,6 +6673,7 @@ struct bnx2 {
6673#define BNX2_PHY_FLAG_DIS_EARLY_DAC 0x00000400 6673#define BNX2_PHY_FLAG_DIS_EARLY_DAC 0x00000400
6674#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800 6674#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800
6675#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000 6675#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000
6676#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000
6676 6677
6677 u32 mii_bmcr; 6678 u32 mii_bmcr;
6678 u32 mii_bmsr; 6679 u32 mii_bmsr;
diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c
index afc7f34b1dcf..8af142ccf373 100644
--- a/drivers/net/bnx2x.c
+++ b/drivers/net/bnx2x.c
@@ -1,6 +1,6 @@
1/* bnx2x.c: Broadcom Everest network driver. 1/* bnx2x.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007 Broadcom Corporation 3 * Copyright (c) 2007-2008 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -10,13 +10,13 @@
10 * Based on code from Michael Chan's bnx2 driver 10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman 11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov 12 * Slowpath rework by Vladislav Zolotarov
13 * Statistics and Link managment by Yitchak Gertner 13 * Statistics and Link management by Yitchak Gertner
14 * 14 *
15 */ 15 */
16 16
17/* define this to make the driver freeze on error 17/* define this to make the driver freeze on error
18 * to allow getting debug info 18 * to allow getting debug info
19 * (you will need to reboot afterwords) 19 * (you will need to reboot afterwards)
20 */ 20 */
21/*#define BNX2X_STOP_ON_ERROR*/ 21/*#define BNX2X_STOP_ON_ERROR*/
22 22
@@ -63,22 +63,21 @@
63#include "bnx2x.h" 63#include "bnx2x.h"
64#include "bnx2x_init.h" 64#include "bnx2x_init.h"
65 65
66#define DRV_MODULE_VERSION "0.40.15" 66#define DRV_MODULE_VERSION "1.40.22"
67#define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $" 67#define DRV_MODULE_RELDATE "2007/11/27"
68#define BNX2X_BC_VER 0x040009 68#define BNX2X_BC_VER 0x040200
69 69
70/* Time in jiffies before concluding the transmitter is hung. */ 70/* Time in jiffies before concluding the transmitter is hung. */
71#define TX_TIMEOUT (5*HZ) 71#define TX_TIMEOUT (5*HZ)
72 72
73static char version[] __devinitdata = 73static char version[] __devinitdata =
74 "Broadcom NetXtreme II 577xx 10Gigabit Ethernet Driver " 74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 76
77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>"); 77MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); 78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79MODULE_LICENSE("GPL"); 79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION); 80MODULE_VERSION(DRV_MODULE_VERSION);
81MODULE_INFO(cvs_version, "$Revision: #356 $");
82 81
83static int use_inta; 82static int use_inta;
84static int poll; 83static int poll;
@@ -94,8 +93,8 @@ module_param(debug, int, 0);
94MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); 93MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95MODULE_PARM_DESC(poll, "use polling (for debug)"); 94MODULE_PARM_DESC(poll, "use polling (for debug)");
96MODULE_PARM_DESC(onefunc, "enable only first function"); 95MODULE_PARM_DESC(onefunc, "enable only first function");
97MODULE_PARM_DESC(nomcp, "ignore managment CPU (Implies onefunc)"); 96MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98MODULE_PARM_DESC(debug, "defualt debug msglevel"); 97MODULE_PARM_DESC(debug, "default debug msglevel");
99 98
100#ifdef BNX2X_MULTI 99#ifdef BNX2X_MULTI
101module_param(use_multi, int, 0); 100module_param(use_multi, int, 0);
@@ -298,8 +297,7 @@ static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
298 297
299static int bnx2x_mc_assert(struct bnx2x *bp) 298static int bnx2x_mc_assert(struct bnx2x *bp)
300{ 299{
301 int i, j; 300 int i, j, rc = 0;
302 int rc = 0;
303 char last_idx; 301 char last_idx;
304 const char storm[] = {"XTCU"}; 302 const char storm[] = {"XTCU"};
305 const u32 intmem_base[] = { 303 const u32 intmem_base[] = {
@@ -313,8 +311,9 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
313 for (i = 0; i < 4; i++) { 311 for (i = 0; i < 4; i++) {
314 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET + 312 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
315 intmem_base[i]); 313 intmem_base[i]);
316 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n", 314 if (last_idx)
317 storm[i], last_idx); 315 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
316 storm[i], last_idx);
318 317
319 /* print the asserts */ 318 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) { 319 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
@@ -330,7 +329,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
330 intmem_base[i]); 329 intmem_base[i]);
331 330
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { 331 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x =" 332 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n", 333 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0); 334 storm[i], j, row3, row2, row1, row0);
336 rc++; 335 rc++;
@@ -341,6 +340,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
341 } 340 }
342 return rc; 341 return rc;
343} 342}
343
344static void bnx2x_fw_dump(struct bnx2x *bp) 344static void bnx2x_fw_dump(struct bnx2x *bp)
345{ 345{
346 u32 mark, offset; 346 u32 mark, offset;
@@ -348,21 +348,22 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
348 int word; 348 int word;
349 349
350 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 350 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
351 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark); 351 mark = ((mark + 0x3) & ~0x3);
352 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
352 353
353 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 354 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
354 for (word = 0; word < 8; word++) 355 for (word = 0; word < 8; word++)
355 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 356 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
356 offset + 4*word)); 357 offset + 4*word));
357 data[8] = 0x0; 358 data[8] = 0x0;
358 printk(KERN_ERR PFX "%s", (char *)data); 359 printk(KERN_CONT "%s", (char *)data);
359 } 360 }
360 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { 361 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
361 for (word = 0; word < 8; word++) 362 for (word = 0; word < 8; word++)
362 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 363 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
363 offset + 4*word)); 364 offset + 4*word));
364 data[8] = 0x0; 365 data[8] = 0x0;
365 printk(KERN_ERR PFX "%s", (char *)data); 366 printk(KERN_CONT "%s", (char *)data);
366 } 367 }
367 printk("\n" KERN_ERR PFX "end of fw dump\n"); 368 printk("\n" KERN_ERR PFX "end of fw dump\n");
368} 369}
@@ -427,10 +428,10 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
427 } 428 }
428 } 429 }
429 430
430 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_t_idx(%u)" 431 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
431 " def_x_idx(%u) def_att_idx(%u) attn_state(%u)" 432 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
432 " spq_prod_idx(%u)\n", 433 " spq_prod_idx(%u)\n",
433 bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx, 434 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
434 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 435 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
435 436
436 437
@@ -441,7 +442,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
441 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n"); 442 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
442} 443}
443 444
444static void bnx2x_enable_int(struct bnx2x *bp) 445static void bnx2x_int_enable(struct bnx2x *bp)
445{ 446{
446 int port = bp->port; 447 int port = bp->port;
447 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 448 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -454,18 +455,26 @@ static void bnx2x_enable_int(struct bnx2x *bp)
454 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 455 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
455 } else { 456 } else {
456 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 457 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
457 HC_CONFIG_0_REG_INT_LINE_EN_0 | 459 HC_CONFIG_0_REG_INT_LINE_EN_0 |
458 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 460 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
461
462 /* Errata A0.158 workaround */
463 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
464 val, port, addr, msix);
465
466 REG_WR(bp, addr, val);
467
459 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; 468 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
460 } 469 }
461 470
462 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n", 471 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
463 val, port, addr, msix); 472 val, port, addr, msix);
464 473
465 REG_WR(bp, addr, val); 474 REG_WR(bp, addr, val);
466} 475}
467 476
468static void bnx2x_disable_int(struct bnx2x *bp) 477static void bnx2x_int_disable(struct bnx2x *bp)
469{ 478{
470 int port = bp->port; 479 int port = bp->port;
471 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 480 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
@@ -484,15 +493,15 @@ static void bnx2x_disable_int(struct bnx2x *bp)
484 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 493 BNX2X_ERR("BUG! proper val not read from IGU!\n");
485} 494}
486 495
487static void bnx2x_disable_int_sync(struct bnx2x *bp) 496static void bnx2x_int_disable_sync(struct bnx2x *bp)
488{ 497{
489 498
490 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 499 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
491 int i; 500 int i;
492 501
493 atomic_inc(&bp->intr_sem); 502 atomic_inc(&bp->intr_sem);
494 /* prevent the HW from sending interrupts*/ 503 /* prevent the HW from sending interrupts */
495 bnx2x_disable_int(bp); 504 bnx2x_int_disable(bp);
496 505
497 /* make sure all ISRs are done */ 506 /* make sure all ISRs are done */
498 if (msix) { 507 if (msix) {
@@ -775,6 +784,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
775 mb(); /* force bnx2x_wait_ramrod to see the change */ 784 mb(); /* force bnx2x_wait_ramrod to see the change */
776 return; 785 return;
777 } 786 }
787
778 switch (command | bp->state) { 788 switch (command | bp->state) {
779 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT): 789 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
780 DP(NETIF_MSG_IFUP, "got setup ramrod\n"); 790 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
@@ -787,20 +797,20 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
787 fp->state = BNX2X_FP_STATE_HALTED; 797 fp->state = BNX2X_FP_STATE_HALTED;
788 break; 798 break;
789 799
790 case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
791 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
792 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
793 break;
794
795 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): 800 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
796 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); 801 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
797 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED; 802 cid);
803 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
798 break; 804 break;
799 805
800 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 806 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
801 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 807 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
802 break; 808 break;
803 809
810 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
811 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
812 break;
813
804 default: 814 default:
805 BNX2X_ERR("unexpected ramrod (%d) state is %x\n", 815 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
806 command, bp->state); 816 command, bp->state);
@@ -1179,12 +1189,175 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1179 return val; 1189 return val;
1180} 1190}
1181 1191
1192static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1193{
1194 u32 cnt;
1195 u32 lock_status;
1196 u32 resource_bit = (1 << resource);
1197 u8 func = bp->port;
1198
1199 /* Validating that the resource is within range */
1200 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1201 DP(NETIF_MSG_HW,
1202 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1203 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1204 return -EINVAL;
1205 }
1206
1207 /* Validating that the resource is not already taken */
1208 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1209 if (lock_status & resource_bit) {
1210 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1211 lock_status, resource_bit);
1212 return -EEXIST;
1213 }
1214
1215 /* Try for 1 second every 5ms */
1216 for (cnt = 0; cnt < 200; cnt++) {
1217 /* Try to acquire the lock */
1218 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1219 resource_bit);
1220 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1221 if (lock_status & resource_bit)
1222 return 0;
1223
1224 msleep(5);
1225 }
1226 DP(NETIF_MSG_HW, "Timeout\n");
1227 return -EAGAIN;
1228}
1229
1230static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1231{
1232 u32 lock_status;
1233 u32 resource_bit = (1 << resource);
1234 u8 func = bp->port;
1235
1236 /* Validating that the resource is within range */
1237 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1238 DP(NETIF_MSG_HW,
1239 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1240 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1241 return -EINVAL;
1242 }
1243
1244 /* Validating that the resource is currently taken */
1245 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1246 if (!(lock_status & resource_bit)) {
1247 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1248 lock_status, resource_bit);
1249 return -EFAULT;
1250 }
1251
1252 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1253 return 0;
1254}
1255
1256static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1257{
1258 /* The GPIO should be swapped if swap register is set and active */
1259 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1260 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1261 int gpio_shift = gpio_num +
1262 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1263 u32 gpio_mask = (1 << gpio_shift);
1264 u32 gpio_reg;
1265
1266 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1267 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1268 return -EINVAL;
1269 }
1270
1271 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1272 /* read GPIO and mask except the float bits */
1273 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1274
1275 switch (mode) {
1276 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1277 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1278 gpio_num, gpio_shift);
1279 /* clear FLOAT and set CLR */
1280 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1281 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1282 break;
1283
1284 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1285 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1286 gpio_num, gpio_shift);
1287 /* clear FLOAT and set SET */
1288 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1289 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1290 break;
1291
1292 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1293 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1294 gpio_num, gpio_shift);
1295 /* set FLOAT */
1296 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1297 break;
1298
1299 default:
1300 break;
1301 }
1302
1303 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1304 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1305
1306 return 0;
1307}
1308
1309static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1310{
1311 u32 spio_mask = (1 << spio_num);
1312 u32 spio_reg;
1313
1314 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1315 (spio_num > MISC_REGISTERS_SPIO_7)) {
1316 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1317 return -EINVAL;
1318 }
1319
1320 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1321 /* read SPIO and mask except the float bits */
1322 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1323
1324 switch (mode) {
1325 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1326 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1327 /* clear FLOAT and set CLR */
1328 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1329 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1330 break;
1331
1332 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1333 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1334 /* clear FLOAT and set SET */
1335 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1336 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1337 break;
1338
1339 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1340 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1341 /* set FLOAT */
1342 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1343 break;
1344
1345 default:
1346 break;
1347 }
1348
1349 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1350 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1351
1352 return 0;
1353}
1354
1182static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val) 1355static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1183{ 1356{
1184 int rc;
1185 u32 tmp, i;
1186 int port = bp->port; 1357 int port = bp->port;
1187 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 1358 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1359 u32 tmp;
1360 int i, rc;
1188 1361
1189/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n", 1362/* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1190 bp->phy_addr, reg, val); */ 1363 bp->phy_addr, reg, val); */
@@ -1236,8 +1409,8 @@ static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1236{ 1409{
1237 int port = bp->port; 1410 int port = bp->port;
1238 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 1411 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1239 u32 val, i; 1412 u32 val;
1240 int rc; 1413 int i, rc;
1241 1414
1242 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { 1415 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1243 1416
@@ -1286,58 +1459,54 @@ static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1286 return rc; 1459 return rc;
1287} 1460}
1288 1461
1289static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val) 1462static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1463 u32 phy_addr, u32 reg, u32 addr, u32 val)
1290{ 1464{
1291 int rc = 0; 1465 u32 tmp;
1292 u32 tmp, i; 1466 int i, rc = 0;
1293 int port = bp->port;
1294 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1295
1296 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1297
1298 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1299 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1300 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1301 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1302 udelay(40);
1303 }
1304 1467
1305 /* set clause 45 mode */ 1468 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1306 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1469 * (a value of 49==0x31) and make sure that the AUTO poll is off
1307 tmp |= EMAC_MDIO_MODE_CLAUSE_45; 1470 */
1308 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); 1471 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1472 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1473 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1474 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1475 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1476 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1477 udelay(40);
1309 1478
1310 /* address */ 1479 /* address */
1311 tmp = ((bp->phy_addr << 21) | (reg << 16) | addr | 1480 tmp = ((phy_addr << 21) | (reg << 16) | addr |
1312 EMAC_MDIO_COMM_COMMAND_ADDRESS | 1481 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1313 EMAC_MDIO_COMM_START_BUSY); 1482 EMAC_MDIO_COMM_START_BUSY);
1314 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp); 1483 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1315 1484
1316 for (i = 0; i < 50; i++) { 1485 for (i = 0; i < 50; i++) {
1317 udelay(10); 1486 udelay(10);
1318 1487
1319 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); 1488 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1320 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1489 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1321 udelay(5); 1490 udelay(5);
1322 break; 1491 break;
1323 } 1492 }
1324 } 1493 }
1325
1326 if (tmp & EMAC_MDIO_COMM_START_BUSY) { 1494 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1327 BNX2X_ERR("write phy register failed\n"); 1495 BNX2X_ERR("write phy register failed\n");
1328 1496
1329 rc = -EBUSY; 1497 rc = -EBUSY;
1498
1330 } else { 1499 } else {
1331 /* data */ 1500 /* data */
1332 tmp = ((bp->phy_addr << 21) | (reg << 16) | val | 1501 tmp = ((phy_addr << 21) | (reg << 16) | val |
1333 EMAC_MDIO_COMM_COMMAND_WRITE_45 | 1502 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1334 EMAC_MDIO_COMM_START_BUSY); 1503 EMAC_MDIO_COMM_START_BUSY);
1335 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp); 1504 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1336 1505
1337 for (i = 0; i < 50; i++) { 1506 for (i = 0; i < 50; i++) {
1338 udelay(10); 1507 udelay(10);
1339 1508
1340 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); 1509 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1341 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { 1510 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1342 udelay(5); 1511 udelay(5);
1343 break; 1512 break;
@@ -1351,75 +1520,78 @@ static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1351 } 1520 }
1352 } 1521 }
1353 1522
1354 /* unset clause 45 mode */ 1523 /* unset clause 45 mode, set the MDIO clock to a faster value
1355 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1524 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1356 tmp &= ~EMAC_MDIO_MODE_CLAUSE_45; 1525 */
1357 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); 1526 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1358 1527 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1359 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { 1528 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1360 1529 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1361 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1362 tmp |= EMAC_MDIO_MODE_AUTO_POLL; 1530 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1363 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); 1531 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1364 }
1365 1532
1366 return rc; 1533 return rc;
1367} 1534}
1368 1535
1369static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr, 1536static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1370 u32 *ret_val) 1537 u32 addr, u32 val)
1371{ 1538{
1372 int port = bp->port; 1539 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1373 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1374 u32 val, i;
1375 int rc = 0;
1376 1540
1377 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { 1541 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1542 reg, addr, val);
1543}
1378 1544
1379 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1545static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1380 val &= ~EMAC_MDIO_MODE_AUTO_POLL; 1546 u32 phy_addr, u32 reg, u32 addr,
1381 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); 1547 u32 *ret_val)
1382 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1548{
1383 udelay(40); 1549 u32 val;
1384 } 1550 int i, rc = 0;
1385 1551
1386 /* set clause 45 mode */ 1552 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1387 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1553 * (a value of 49==0x31) and make sure that the AUTO poll is off
1388 val |= EMAC_MDIO_MODE_CLAUSE_45; 1554 */
1389 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); 1555 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1556 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1557 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1558 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1559 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1560 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1561 udelay(40);
1390 1562
1391 /* address */ 1563 /* address */
1392 val = ((bp->phy_addr << 21) | (reg << 16) | addr | 1564 val = ((phy_addr << 21) | (reg << 16) | addr |
1393 EMAC_MDIO_COMM_COMMAND_ADDRESS | 1565 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1394 EMAC_MDIO_COMM_START_BUSY); 1566 EMAC_MDIO_COMM_START_BUSY);
1395 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val); 1567 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1396 1568
1397 for (i = 0; i < 50; i++) { 1569 for (i = 0; i < 50; i++) {
1398 udelay(10); 1570 udelay(10);
1399 1571
1400 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); 1572 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1401 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1573 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1402 udelay(5); 1574 udelay(5);
1403 break; 1575 break;
1404 } 1576 }
1405 } 1577 }
1406
1407 if (val & EMAC_MDIO_COMM_START_BUSY) { 1578 if (val & EMAC_MDIO_COMM_START_BUSY) {
1408 BNX2X_ERR("read phy register failed\n"); 1579 BNX2X_ERR("read phy register failed\n");
1409 1580
1410 *ret_val = 0; 1581 *ret_val = 0;
1411 rc = -EBUSY; 1582 rc = -EBUSY;
1583
1412 } else { 1584 } else {
1413 /* data */ 1585 /* data */
1414 val = ((bp->phy_addr << 21) | (reg << 16) | 1586 val = ((phy_addr << 21) | (reg << 16) |
1415 EMAC_MDIO_COMM_COMMAND_READ_45 | 1587 EMAC_MDIO_COMM_COMMAND_READ_45 |
1416 EMAC_MDIO_COMM_START_BUSY); 1588 EMAC_MDIO_COMM_START_BUSY);
1417 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val); 1589 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1418 1590
1419 for (i = 0; i < 50; i++) { 1591 for (i = 0; i < 50; i++) {
1420 udelay(10); 1592 udelay(10);
1421 1593
1422 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); 1594 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1423 if (!(val & EMAC_MDIO_COMM_START_BUSY)) { 1595 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1424 val &= EMAC_MDIO_COMM_DATA; 1596 val &= EMAC_MDIO_COMM_DATA;
1425 break; 1597 break;
@@ -1436,31 +1608,39 @@ static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
1436 *ret_val = val; 1608 *ret_val = val;
1437 } 1609 }
1438 1610
1439 /* unset clause 45 mode */ 1611 /* unset clause 45 mode, set the MDIO clock to a faster value
1440 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); 1612 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1441 val &= ~EMAC_MDIO_MODE_CLAUSE_45; 1613 */
1442 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); 1614 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1443 1615 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1444 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { 1616 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1445 1617 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1446 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1447 val |= EMAC_MDIO_MODE_AUTO_POLL; 1618 val |= EMAC_MDIO_MODE_AUTO_POLL;
1448 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); 1619 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1449 }
1450 1620
1451 return rc; 1621 return rc;
1452} 1622}
1453 1623
1454static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val) 1624static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1625 u32 addr, u32 *ret_val)
1626{
1627 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1628
1629 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1630 reg, addr, ret_val);
1631}
1632
1633static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1634 u32 addr, u32 val)
1455{ 1635{
1456 int i; 1636 int i;
1457 u32 rd_val; 1637 u32 rd_val;
1458 1638
1459 might_sleep(); 1639 might_sleep();
1460 for (i = 0; i < 10; i++) { 1640 for (i = 0; i < 10; i++) {
1461 bnx2x_mdio45_write(bp, reg, addr, val); 1641 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
1462 msleep(5); 1642 msleep(5);
1463 bnx2x_mdio45_read(bp, reg, addr, &rd_val); 1643 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
1464 /* if the read value is not the same as the value we wrote, 1644 /* if the read value is not the same as the value we wrote,
1465 we should write it again */ 1645 we should write it again */
1466 if (rd_val == val) 1646 if (rd_val == val)
@@ -1471,18 +1651,81 @@ static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1471} 1651}
1472 1652
1473/* 1653/*
1474 * link managment 1654 * link management
1475 */ 1655 */
1476 1656
1657static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1658{
1659 switch (pause_result) { /* ASYM P ASYM P */
1660 case 0xb: /* 1 0 1 1 */
1661 bp->flow_ctrl = FLOW_CTRL_TX;
1662 break;
1663
1664 case 0xe: /* 1 1 1 0 */
1665 bp->flow_ctrl = FLOW_CTRL_RX;
1666 break;
1667
1668 case 0x5: /* 0 1 0 1 */
1669 case 0x7: /* 0 1 1 1 */
1670 case 0xd: /* 1 1 0 1 */
1671 case 0xf: /* 1 1 1 1 */
1672 bp->flow_ctrl = FLOW_CTRL_BOTH;
1673 break;
1674
1675 default:
1676 break;
1677 }
1678}
1679
1680static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1681{
1682 u32 ext_phy_addr;
1683 u32 ld_pause; /* local */
1684 u32 lp_pause; /* link partner */
1685 u32 an_complete; /* AN complete */
1686 u32 pause_result;
1687 u8 ret = 0;
1688
1689 ext_phy_addr = ((bp->ext_phy_config &
1690 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1691 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1692
1693 /* read twice */
1694 bnx2x_mdio45_read(bp, ext_phy_addr,
1695 EXT_PHY_KR_AUTO_NEG_DEVAD,
1696 EXT_PHY_KR_STATUS, &an_complete);
1697 bnx2x_mdio45_read(bp, ext_phy_addr,
1698 EXT_PHY_KR_AUTO_NEG_DEVAD,
1699 EXT_PHY_KR_STATUS, &an_complete);
1700
1701 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1702 ret = 1;
1703 bnx2x_mdio45_read(bp, ext_phy_addr,
1704 EXT_PHY_KR_AUTO_NEG_DEVAD,
1705 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1706 bnx2x_mdio45_read(bp, ext_phy_addr,
1707 EXT_PHY_KR_AUTO_NEG_DEVAD,
1708 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1709 pause_result = (ld_pause &
1710 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1711 pause_result |= (lp_pause &
1712 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1713 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1714 pause_result);
1715 bnx2x_pause_resolve(bp, pause_result);
1716 }
1717 return ret;
1718}
1719
1477static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status) 1720static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1478{ 1721{
1479 u32 ld_pause; /* local driver */ 1722 u32 ld_pause; /* local driver */
1480 u32 lp_pause; /* link partner */ 1723 u32 lp_pause; /* link partner */
1481 u32 pause_result; 1724 u32 pause_result;
1482 1725
1483 bp->flow_ctrl = 0; 1726 bp->flow_ctrl = 0;
1484 1727
1485 /* reolve from gp_status in case of AN complete and not sgmii */ 1728 /* resolve from gp_status in case of AN complete and not sgmii */
1486 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) && 1729 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1487 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && 1730 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1488 (!(bp->phy_flags & PHY_SGMII_FLAG)) && 1731 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
@@ -1499,45 +1742,57 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1499 pause_result |= (lp_pause & 1742 pause_result |= (lp_pause &
1500 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 1743 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1501 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result); 1744 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1745 bnx2x_pause_resolve(bp, pause_result);
1746 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1747 !(bnx2x_ext_phy_resove_fc(bp))) {
1748 /* forced speed */
1749 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1750 switch (bp->req_flow_ctrl) {
1751 case FLOW_CTRL_AUTO:
1752 if (bp->dev->mtu <= 4500)
1753 bp->flow_ctrl = FLOW_CTRL_BOTH;
1754 else
1755 bp->flow_ctrl = FLOW_CTRL_TX;
1756 break;
1502 1757
1503 switch (pause_result) { /* ASYM P ASYM P */ 1758 case FLOW_CTRL_TX:
1504 case 0xb: /* 1 0 1 1 */ 1759 bp->flow_ctrl = FLOW_CTRL_TX;
1505 bp->flow_ctrl = FLOW_CTRL_TX; 1760 break;
1506 break;
1507
1508 case 0xe: /* 1 1 1 0 */
1509 bp->flow_ctrl = FLOW_CTRL_RX;
1510 break;
1511 1761
1512 case 0x5: /* 0 1 0 1 */ 1762 case FLOW_CTRL_RX:
1513 case 0x7: /* 0 1 1 1 */ 1763 if (bp->dev->mtu <= 4500)
1514 case 0xd: /* 1 1 0 1 */ 1764 bp->flow_ctrl = FLOW_CTRL_RX;
1515 case 0xf: /* 1 1 1 1 */ 1765 break;
1516 bp->flow_ctrl = FLOW_CTRL_BOTH;
1517 break;
1518 1766
1519 default: 1767 case FLOW_CTRL_BOTH:
1520 break; 1768 if (bp->dev->mtu <= 4500)
1521 } 1769 bp->flow_ctrl = FLOW_CTRL_BOTH;
1770 else
1771 bp->flow_ctrl = FLOW_CTRL_TX;
1772 break;
1522 1773
1523 } else { /* forced mode */ 1774 case FLOW_CTRL_NONE:
1524 switch (bp->req_flow_ctrl) { 1775 default:
1525 case FLOW_CTRL_AUTO: 1776 break;
1526 if (bp->dev->mtu <= 4500) 1777 }
1527 bp->flow_ctrl = FLOW_CTRL_BOTH; 1778 } else { /* forced mode */
1528 else 1779 switch (bp->req_flow_ctrl) {
1529 bp->flow_ctrl = FLOW_CTRL_TX; 1780 case FLOW_CTRL_AUTO:
1530 break; 1781 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1782 " req_autoneg 0x%x\n",
1783 bp->req_flow_ctrl, bp->req_autoneg);
1784 break;
1531 1785
1532 case FLOW_CTRL_TX: 1786 case FLOW_CTRL_TX:
1533 case FLOW_CTRL_RX: 1787 case FLOW_CTRL_RX:
1534 case FLOW_CTRL_BOTH: 1788 case FLOW_CTRL_BOTH:
1535 bp->flow_ctrl = bp->req_flow_ctrl; 1789 bp->flow_ctrl = bp->req_flow_ctrl;
1536 break; 1790 break;
1537 1791
1538 case FLOW_CTRL_NONE: 1792 case FLOW_CTRL_NONE:
1539 default: 1793 default:
1540 break; 1794 break;
1795 }
1541 } 1796 }
1542 } 1797 }
1543 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl); 1798 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
@@ -1548,9 +1803,9 @@ static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1548 bp->link_status = 0; 1803 bp->link_status = 0;
1549 1804
1550 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { 1805 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1551 DP(NETIF_MSG_LINK, "link up\n"); 1806 DP(NETIF_MSG_LINK, "phy link up\n");
1552 1807
1553 bp->link_up = 1; 1808 bp->phy_link_up = 1;
1554 bp->link_status |= LINK_STATUS_LINK_UP; 1809 bp->link_status |= LINK_STATUS_LINK_UP;
1555 1810
1556 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) 1811 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
@@ -1659,20 +1914,20 @@ static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1659 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED; 1914 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1660 1915
1661 } else { /* link_down */ 1916 } else { /* link_down */
1662 DP(NETIF_MSG_LINK, "link down\n"); 1917 DP(NETIF_MSG_LINK, "phy link down\n");
1663 1918
1664 bp->link_up = 0; 1919 bp->phy_link_up = 0;
1665 1920
1666 bp->line_speed = 0; 1921 bp->line_speed = 0;
1667 bp->duplex = DUPLEX_FULL; 1922 bp->duplex = DUPLEX_FULL;
1668 bp->flow_ctrl = 0; 1923 bp->flow_ctrl = 0;
1669 } 1924 }
1670 1925
1671 DP(NETIF_MSG_LINK, "gp_status 0x%x link_up %d\n" 1926 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
1672 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x" 1927 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1673 " link_status 0x%x\n", 1928 " link_status 0x%x\n",
1674 gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl, 1929 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1675 bp->link_status); 1930 bp->flow_ctrl, bp->link_status);
1676} 1931}
1677 1932
1678static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g) 1933static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
@@ -1680,40 +1935,40 @@ static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1680 int port = bp->port; 1935 int port = bp->port;
1681 1936
1682 /* first reset all status 1937 /* first reset all status
1683 * we asume only one line will be change at a time */ 1938 * we assume only one line will be change at a time */
1684 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 1939 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1685 (NIG_XGXS0_LINK_STATUS | 1940 (NIG_STATUS_XGXS0_LINK10G |
1686 NIG_SERDES0_LINK_STATUS | 1941 NIG_STATUS_XGXS0_LINK_STATUS |
1687 NIG_STATUS_INTERRUPT_XGXS0_LINK10G)); 1942 NIG_STATUS_SERDES0_LINK_STATUS));
1688 if (bp->link_up) { 1943 if (bp->phy_link_up) {
1689 if (is_10g) { 1944 if (is_10g) {
1690 /* Disable the 10G link interrupt 1945 /* Disable the 10G link interrupt
1691 * by writing 1 to the status register 1946 * by writing 1 to the status register
1692 */ 1947 */
1693 DP(NETIF_MSG_LINK, "10G XGXS link up\n"); 1948 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
1694 bnx2x_bits_en(bp, 1949 bnx2x_bits_en(bp,
1695 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 1950 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1696 NIG_STATUS_INTERRUPT_XGXS0_LINK10G); 1951 NIG_STATUS_XGXS0_LINK10G);
1697 1952
1698 } else if (bp->phy_flags & PHY_XGXS_FLAG) { 1953 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1699 /* Disable the link interrupt 1954 /* Disable the link interrupt
1700 * by writing 1 to the relevant lane 1955 * by writing 1 to the relevant lane
1701 * in the status register 1956 * in the status register
1702 */ 1957 */
1703 DP(NETIF_MSG_LINK, "1G XGXS link up\n"); 1958 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
1704 bnx2x_bits_en(bp, 1959 bnx2x_bits_en(bp,
1705 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 1960 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1706 ((1 << bp->ser_lane) << 1961 ((1 << bp->ser_lane) <<
1707 NIG_XGXS0_LINK_STATUS_SIZE)); 1962 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
1708 1963
1709 } else { /* SerDes */ 1964 } else { /* SerDes */
1710 DP(NETIF_MSG_LINK, "SerDes link up\n"); 1965 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
1711 /* Disable the link interrupt 1966 /* Disable the link interrupt
1712 * by writing 1 to the status register 1967 * by writing 1 to the status register
1713 */ 1968 */
1714 bnx2x_bits_en(bp, 1969 bnx2x_bits_en(bp,
1715 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 1970 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1716 NIG_SERDES0_LINK_STATUS); 1971 NIG_STATUS_SERDES0_LINK_STATUS);
1717 } 1972 }
1718 1973
1719 } else { /* link_down */ 1974 } else { /* link_down */
@@ -1724,91 +1979,182 @@ static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1724{ 1979{
1725 u32 ext_phy_type; 1980 u32 ext_phy_type;
1726 u32 ext_phy_addr; 1981 u32 ext_phy_addr;
1727 u32 local_phy; 1982 u32 val1 = 0, val2;
1728 u32 val = 0;
1729 u32 rx_sd, pcs_status; 1983 u32 rx_sd, pcs_status;
1730 1984
1731 if (bp->phy_flags & PHY_XGXS_FLAG) { 1985 if (bp->phy_flags & PHY_XGXS_FLAG) {
1732 local_phy = bp->phy_addr;
1733 ext_phy_addr = ((bp->ext_phy_config & 1986 ext_phy_addr = ((bp->ext_phy_config &
1734 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> 1987 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1735 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); 1988 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1736 bp->phy_addr = (u8)ext_phy_addr;
1737 1989
1738 ext_phy_type = XGXS_EXT_PHY_TYPE(bp); 1990 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1739 switch (ext_phy_type) { 1991 switch (ext_phy_type) {
1740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 1992 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1741 DP(NETIF_MSG_LINK, "XGXS Direct\n"); 1993 DP(NETIF_MSG_LINK, "XGXS Direct\n");
1742 val = 1; 1994 val1 = 1;
1743 break; 1995 break;
1744 1996
1745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 1997 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1746 DP(NETIF_MSG_LINK, "XGXS 8705\n"); 1998 DP(NETIF_MSG_LINK, "XGXS 8705\n");
1747 bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD, 1999 bnx2x_mdio45_read(bp, ext_phy_addr,
1748 EXT_PHY_OPT_LASI_STATUS, &val); 2000 EXT_PHY_OPT_WIS_DEVAD,
1749 DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val); 2001 EXT_PHY_OPT_LASI_STATUS, &val1);
1750 2002 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
1751 bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD, 2003
1752 EXT_PHY_OPT_LASI_STATUS, &val); 2004 bnx2x_mdio45_read(bp, ext_phy_addr,
1753 DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val); 2005 EXT_PHY_OPT_WIS_DEVAD,
1754 2006 EXT_PHY_OPT_LASI_STATUS, &val1);
1755 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 2007 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2008
2009 bnx2x_mdio45_read(bp, ext_phy_addr,
2010 EXT_PHY_OPT_PMA_PMD_DEVAD,
1756 EXT_PHY_OPT_PMD_RX_SD, &rx_sd); 2011 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1757 val = (rx_sd & 0x1); 2012 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2013 val1 = (rx_sd & 0x1);
1758 break; 2014 break;
1759 2015
1760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 2016 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
1761 DP(NETIF_MSG_LINK, "XGXS 8706\n"); 2017 DP(NETIF_MSG_LINK, "XGXS 8706\n");
1762 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 2018 bnx2x_mdio45_read(bp, ext_phy_addr,
1763 EXT_PHY_OPT_LASI_STATUS, &val); 2019 EXT_PHY_OPT_PMA_PMD_DEVAD,
1764 DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val); 2020 EXT_PHY_OPT_LASI_STATUS, &val1);
1765 2021 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
1766 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 2022
1767 EXT_PHY_OPT_LASI_STATUS, &val); 2023 bnx2x_mdio45_read(bp, ext_phy_addr,
1768 DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val); 2024 EXT_PHY_OPT_PMA_PMD_DEVAD,
1769 2025 EXT_PHY_OPT_LASI_STATUS, &val1);
1770 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 2026 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2027
2028 bnx2x_mdio45_read(bp, ext_phy_addr,
2029 EXT_PHY_OPT_PMA_PMD_DEVAD,
1771 EXT_PHY_OPT_PMD_RX_SD, &rx_sd); 2030 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1772 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD, 2031 bnx2x_mdio45_read(bp, ext_phy_addr,
1773 EXT_PHY_OPT_PCS_STATUS, &pcs_status); 2032 EXT_PHY_OPT_PCS_DEVAD,
2033 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2034 bnx2x_mdio45_read(bp, ext_phy_addr,
2035 EXT_PHY_AUTO_NEG_DEVAD,
2036 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2037
1774 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x" 2038 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
1775 " pcs_status 0x%x\n", rx_sd, pcs_status); 2039 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
1776 /* link is up if both bit 0 of pmd_rx and 2040 rx_sd, pcs_status, val2, (val2 & (1<<1)));
1777 * bit 0 of pcs_status are set 2041 /* link is up if both bit 0 of pmd_rx_sd and
2042 * bit 0 of pcs_status are set, or if the autoneg bit
2043 1 is set
1778 */ 2044 */
1779 val = (rx_sd & pcs_status); 2045 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2046 break;
2047
2048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2049 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2050
2051 /* clear the interrupt LASI status register */
2052 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2053 ext_phy_addr,
2054 EXT_PHY_KR_PCS_DEVAD,
2055 EXT_PHY_KR_LASI_STATUS, &val2);
2056 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2057 ext_phy_addr,
2058 EXT_PHY_KR_PCS_DEVAD,
2059 EXT_PHY_KR_LASI_STATUS, &val1);
2060 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2061 val2, val1);
2062 /* Check the LASI */
2063 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2064 ext_phy_addr,
2065 EXT_PHY_KR_PMA_PMD_DEVAD,
2066 0x9003, &val2);
2067 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2068 ext_phy_addr,
2069 EXT_PHY_KR_PMA_PMD_DEVAD,
2070 0x9003, &val1);
2071 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2072 val2, val1);
2073 /* Check the link status */
2074 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2075 ext_phy_addr,
2076 EXT_PHY_KR_PCS_DEVAD,
2077 EXT_PHY_KR_PCS_STATUS, &val2);
2078 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2079 /* Check the link status on 1.1.2 */
2080 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2081 ext_phy_addr,
2082 EXT_PHY_OPT_PMA_PMD_DEVAD,
2083 EXT_PHY_KR_STATUS, &val2);
2084 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2085 ext_phy_addr,
2086 EXT_PHY_OPT_PMA_PMD_DEVAD,
2087 EXT_PHY_KR_STATUS, &val1);
2088 DP(NETIF_MSG_LINK,
2089 "KR PMA status 0x%x->0x%x\n", val2, val1);
2090 val1 = ((val1 & 4) == 4);
2091 /* If 1G was requested assume the link is up */
2092 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2093 (bp->req_line_speed == SPEED_1000))
2094 val1 = 1;
2095 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2096 break;
2097
2098 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2099 bnx2x_mdio45_read(bp, ext_phy_addr,
2100 EXT_PHY_OPT_PMA_PMD_DEVAD,
2101 EXT_PHY_OPT_LASI_STATUS, &val2);
2102 bnx2x_mdio45_read(bp, ext_phy_addr,
2103 EXT_PHY_OPT_PMA_PMD_DEVAD,
2104 EXT_PHY_OPT_LASI_STATUS, &val1);
2105 DP(NETIF_MSG_LINK,
2106 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2107 bnx2x_mdio45_read(bp, ext_phy_addr,
2108 EXT_PHY_OPT_PMA_PMD_DEVAD,
2109 EXT_PHY_KR_STATUS, &val2);
2110 bnx2x_mdio45_read(bp, ext_phy_addr,
2111 EXT_PHY_OPT_PMA_PMD_DEVAD,
2112 EXT_PHY_KR_STATUS, &val1);
2113 DP(NETIF_MSG_LINK,
2114 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2115 val1 = ((val1 & 4) == 4);
2116 /* if link is up
2117 * print the AN outcome of the SFX7101 PHY
2118 */
2119 if (val1) {
2120 bnx2x_mdio45_read(bp, ext_phy_addr,
2121 EXT_PHY_KR_AUTO_NEG_DEVAD,
2122 0x21, &val2);
2123 DP(NETIF_MSG_LINK,
2124 "SFX7101 AN status 0x%x->%s\n", val2,
2125 (val2 & (1<<14)) ? "Master" : "Slave");
2126 }
1780 break; 2127 break;
1781 2128
1782 default: 2129 default:
1783 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", 2130 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
1784 bp->ext_phy_config); 2131 bp->ext_phy_config);
1785 val = 0; 2132 val1 = 0;
1786 break; 2133 break;
1787 } 2134 }
1788 bp->phy_addr = local_phy;
1789 2135
1790 } else { /* SerDes */ 2136 } else { /* SerDes */
1791 ext_phy_type = SERDES_EXT_PHY_TYPE(bp); 2137 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
1792 switch (ext_phy_type) { 2138 switch (ext_phy_type) {
1793 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 2139 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
1794 DP(NETIF_MSG_LINK, "SerDes Direct\n"); 2140 DP(NETIF_MSG_LINK, "SerDes Direct\n");
1795 val = 1; 2141 val1 = 1;
1796 break; 2142 break;
1797 2143
1798 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 2144 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1799 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 2145 DP(NETIF_MSG_LINK, "SerDes 5482\n");
1800 val = 1; 2146 val1 = 1;
1801 break; 2147 break;
1802 2148
1803 default: 2149 default:
1804 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", 2150 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
1805 bp->ext_phy_config); 2151 bp->ext_phy_config);
1806 val = 0; 2152 val1 = 0;
1807 break; 2153 break;
1808 } 2154 }
1809 } 2155 }
1810 2156
1811 return val; 2157 return val1;
1812} 2158}
1813 2159
1814static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb) 2160static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
@@ -1819,7 +2165,7 @@ static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
1819 u32 wb_write[2]; 2165 u32 wb_write[2];
1820 u32 val; 2166 u32 val;
1821 2167
1822 DP(NETIF_MSG_LINK, "enableing BigMAC\n"); 2168 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
1823 /* reset and unreset the BigMac */ 2169 /* reset and unreset the BigMac */
1824 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2170 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1825 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 2171 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -1933,6 +2279,35 @@ static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
1933 bp->stats_state = STATS_STATE_ENABLE; 2279 bp->stats_state = STATS_STATE_ENABLE;
1934} 2280}
1935 2281
2282static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2283{
2284 int port = bp->port;
2285 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2286 NIG_REG_INGRESS_BMAC0_MEM;
2287 u32 wb_write[2];
2288
2289 /* Only if the bmac is out of reset */
2290 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2291 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2292 /* Clear Rx Enable bit in BMAC_CONTROL register */
2293#ifdef BNX2X_DMAE_RD
2294 bnx2x_read_dmae(bp, bmac_addr +
2295 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2296 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2297 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2298#else
2299 wb_write[0] = REG_RD(bp,
2300 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2301 wb_write[1] = REG_RD(bp,
2302 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2303#endif
2304 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2305 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2306 wb_write, 2);
2307 msleep(1);
2308 }
2309}
2310
1936static void bnx2x_emac_enable(struct bnx2x *bp) 2311static void bnx2x_emac_enable(struct bnx2x *bp)
1937{ 2312{
1938 int port = bp->port; 2313 int port = bp->port;
@@ -1940,7 +2315,7 @@ static void bnx2x_emac_enable(struct bnx2x *bp)
1940 u32 val; 2315 u32 val;
1941 int timeout; 2316 int timeout;
1942 2317
1943 DP(NETIF_MSG_LINK, "enableing EMAC\n"); 2318 DP(NETIF_MSG_LINK, "enabling EMAC\n");
1944 /* reset and unreset the emac core */ 2319 /* reset and unreset the emac core */
1945 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2320 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1946 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); 2321 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
@@ -2033,7 +2408,7 @@ static void bnx2x_emac_enable(struct bnx2x *bp)
2033 EMAC_TX_MODE_EXT_PAUSE_EN); 2408 EMAC_TX_MODE_EXT_PAUSE_EN);
2034 } 2409 }
2035 2410
2036 /* KEEP_VLAN_TAG, promiscous */ 2411 /* KEEP_VLAN_TAG, promiscuous */
2037 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 2412 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2038 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 2413 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2039 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); 2414 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
@@ -2161,7 +2536,6 @@ static void bnx2x_pbf_update(struct bnx2x *bp)
2161 u32 count = 1000; 2536 u32 count = 1000;
2162 u32 pause = 0; 2537 u32 pause = 0;
2163 2538
2164
2165 /* disable port */ 2539 /* disable port */
2166 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); 2540 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2167 2541
@@ -2232,7 +2606,7 @@ static void bnx2x_pbf_update(struct bnx2x *bp)
2232static void bnx2x_update_mng(struct bnx2x *bp) 2606static void bnx2x_update_mng(struct bnx2x *bp)
2233{ 2607{
2234 if (!nomcp) 2608 if (!nomcp)
2235 SHMEM_WR(bp, drv_fw_mb[bp->port].link_status, 2609 SHMEM_WR(bp, port_mb[bp->port].link_status,
2236 bp->link_status); 2610 bp->link_status);
2237} 2611}
2238 2612
@@ -2294,19 +2668,19 @@ static void bnx2x_link_down(struct bnx2x *bp)
2294 DP(BNX2X_MSG_STATS, "stats_state - STOP\n"); 2668 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2295 } 2669 }
2296 2670
2297 /* indicate link down */ 2671 /* indicate no mac active */
2298 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG); 2672 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2299 2673
2300 /* reset BigMac */ 2674 /* update shared memory */
2301 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 2675 bnx2x_update_mng(bp);
2302 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2303 2676
2304 /* ignore drain flag interrupt */
2305 /* activate nig drain */ 2677 /* activate nig drain */
2306 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 2678 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2307 2679
2308 /* update shared memory */ 2680 /* reset BigMac */
2309 bnx2x_update_mng(bp); 2681 bnx2x_bmac_rx_disable(bp);
2682 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2683 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2310 2684
2311 /* indicate link down */ 2685 /* indicate link down */
2312 bnx2x_link_report(bp); 2686 bnx2x_link_report(bp);
@@ -2317,14 +2691,15 @@ static void bnx2x_init_mac_stats(struct bnx2x *bp);
2317/* This function is called upon link interrupt */ 2691/* This function is called upon link interrupt */
2318static void bnx2x_link_update(struct bnx2x *bp) 2692static void bnx2x_link_update(struct bnx2x *bp)
2319{ 2693{
2320 u32 gp_status;
2321 int port = bp->port; 2694 int port = bp->port;
2322 int i; 2695 int i;
2696 u32 gp_status;
2323 int link_10g; 2697 int link_10g;
2324 2698
2325 DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x," 2699 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
2326 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x," 2700 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2327 " 10G %x, XGXS_LINK %x\n", port, (bp->phy_flags & PHY_XGXS_FLAG), 2701 " 10G %x, XGXS_LINK %x\n", port,
2702 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2328 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4), 2703 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2329 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask, 2704 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2330 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), 2705 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
@@ -2336,7 +2711,7 @@ static void bnx2x_link_update(struct bnx2x *bp)
2336 might_sleep(); 2711 might_sleep();
2337 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS); 2712 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2338 /* avoid fast toggling */ 2713 /* avoid fast toggling */
2339 for (i = 0 ; i < 10 ; i++) { 2714 for (i = 0; i < 10; i++) {
2340 msleep(10); 2715 msleep(10);
2341 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1, 2716 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2342 &gp_status); 2717 &gp_status);
@@ -2351,7 +2726,8 @@ static void bnx2x_link_update(struct bnx2x *bp)
2351 bnx2x_link_int_ack(bp, link_10g); 2726 bnx2x_link_int_ack(bp, link_10g);
2352 2727
2353 /* link is up only if both local phy and external phy are up */ 2728 /* link is up only if both local phy and external phy are up */
2354 if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) { 2729 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2730 if (bp->link_up) {
2355 if (link_10g) { 2731 if (link_10g) {
2356 bnx2x_bmac_enable(bp, 0); 2732 bnx2x_bmac_enable(bp, 0);
2357 bnx2x_leds_set(bp, SPEED_10000); 2733 bnx2x_leds_set(bp, SPEED_10000);
@@ -2427,7 +2803,9 @@ static void bnx2x_reset_unicore(struct bnx2x *bp)
2427 } 2803 }
2428 } 2804 }
2429 2805
2430 BNX2X_ERR("BUG! unicore is still in reset!\n"); 2806 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2807 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2808 bp->phy_addr);
2431} 2809}
2432 2810
2433static void bnx2x_set_swap_lanes(struct bnx2x *bp) 2811static void bnx2x_set_swap_lanes(struct bnx2x *bp)
@@ -2475,12 +2853,12 @@ static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2475 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT); 2853 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2476 2854
2477 bnx2x_mdio22_write(bp, 2855 bnx2x_mdio22_write(bp,
2478 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, 2856 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2479 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); 2857 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2480 2858
2481 bnx2x_mdio22_read(bp, 2859 bnx2x_mdio22_read(bp,
2482 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 2860 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2483 &control2); 2861 &control2);
2484 2862
2485 if (bp->autoneg & AUTONEG_PARALLEL) { 2863 if (bp->autoneg & AUTONEG_PARALLEL) {
2486 control2 |= 2864 control2 |=
@@ -2490,8 +2868,14 @@ static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2490 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; 2868 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2491 } 2869 }
2492 bnx2x_mdio22_write(bp, 2870 bnx2x_mdio22_write(bp,
2493 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, 2871 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2494 control2); 2872 control2);
2873
2874 /* Disable parallel detection of HiG */
2875 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2876 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2877 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2878 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
2495 } 2879 }
2496} 2880}
2497 2881
@@ -2625,7 +3009,7 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2625 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G); 3009 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2626 3010
2627 /* set extended capabilities */ 3011 /* set extended capabilities */
2628 if (bp->advertising & ADVERTISED_2500baseT_Full) 3012 if (bp->advertising & ADVERTISED_2500baseX_Full)
2629 val |= MDIO_OVER_1G_UP1_2_5G; 3013 val |= MDIO_OVER_1G_UP1_2_5G;
2630 if (bp->advertising & ADVERTISED_10000baseT_Full) 3014 if (bp->advertising & ADVERTISED_10000baseT_Full)
2631 val |= MDIO_OVER_1G_UP1_10G; 3015 val |= MDIO_OVER_1G_UP1_10G;
@@ -2641,20 +3025,91 @@ static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
2641 /* for AN, we are always publishing full duplex */ 3025 /* for AN, we are always publishing full duplex */
2642 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 3026 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2643 3027
2644 /* set pause */ 3028 /* resolve pause mode and advertisement
2645 switch (bp->pause_mode) { 3029 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
2646 case PAUSE_SYMMETRIC: 3030 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
2647 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; 3031 switch (bp->req_flow_ctrl) {
2648 break; 3032 case FLOW_CTRL_AUTO:
2649 case PAUSE_ASYMMETRIC: 3033 if (bp->dev->mtu <= 4500) {
2650 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 3034 an_adv |=
2651 break; 3035 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2652 case PAUSE_BOTH: 3036 bp->advertising |= (ADVERTISED_Pause |
2653 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 3037 ADVERTISED_Asym_Pause);
2654 break; 3038 } else {
2655 case PAUSE_NONE: 3039 an_adv |=
2656 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 3040 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2657 break; 3041 bp->advertising |= ADVERTISED_Asym_Pause;
3042 }
3043 break;
3044
3045 case FLOW_CTRL_TX:
3046 an_adv |=
3047 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3048 bp->advertising |= ADVERTISED_Asym_Pause;
3049 break;
3050
3051 case FLOW_CTRL_RX:
3052 if (bp->dev->mtu <= 4500) {
3053 an_adv |=
3054 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3055 bp->advertising |= (ADVERTISED_Pause |
3056 ADVERTISED_Asym_Pause);
3057 } else {
3058 an_adv |=
3059 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3060 bp->advertising &= ~(ADVERTISED_Pause |
3061 ADVERTISED_Asym_Pause);
3062 }
3063 break;
3064
3065 case FLOW_CTRL_BOTH:
3066 if (bp->dev->mtu <= 4500) {
3067 an_adv |=
3068 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3069 bp->advertising |= (ADVERTISED_Pause |
3070 ADVERTISED_Asym_Pause);
3071 } else {
3072 an_adv |=
3073 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3074 bp->advertising |= ADVERTISED_Asym_Pause;
3075 }
3076 break;
3077
3078 case FLOW_CTRL_NONE:
3079 default:
3080 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3081 bp->advertising &= ~(ADVERTISED_Pause |
3082 ADVERTISED_Asym_Pause);
3083 break;
3084 }
3085 } else { /* forced mode */
3086 switch (bp->req_flow_ctrl) {
3087 case FLOW_CTRL_AUTO:
3088 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3089 " req_autoneg 0x%x\n",
3090 bp->req_flow_ctrl, bp->req_autoneg);
3091 break;
3092
3093 case FLOW_CTRL_TX:
3094 an_adv |=
3095 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3096 bp->advertising |= ADVERTISED_Asym_Pause;
3097 break;
3098
3099 case FLOW_CTRL_RX:
3100 case FLOW_CTRL_BOTH:
3101 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3102 bp->advertising |= (ADVERTISED_Pause |
3103 ADVERTISED_Asym_Pause);
3104 break;
3105
3106 case FLOW_CTRL_NONE:
3107 default:
3108 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3109 bp->advertising &= ~(ADVERTISED_Pause |
3110 ADVERTISED_Asym_Pause);
3111 break;
3112 }
2658 } 3113 }
2659 3114
2660 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0); 3115 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
@@ -2752,47 +3207,162 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
2752static void bnx2x_link_int_enable(struct bnx2x *bp) 3207static void bnx2x_link_int_enable(struct bnx2x *bp)
2753{ 3208{
2754 int port = bp->port; 3209 int port = bp->port;
3210 u32 ext_phy_type;
3211 u32 mask;
2755 3212
2756 /* setting the status to report on link up 3213 /* setting the status to report on link up
2757 for either XGXS or SerDes */ 3214 for either XGXS or SerDes */
2758 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 3215 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2759 (NIG_XGXS0_LINK_STATUS | 3216 (NIG_STATUS_XGXS0_LINK10G |
2760 NIG_STATUS_INTERRUPT_XGXS0_LINK10G | 3217 NIG_STATUS_XGXS0_LINK_STATUS |
2761 NIG_SERDES0_LINK_STATUS)); 3218 NIG_STATUS_SERDES0_LINK_STATUS));
2762 3219
2763 if (bp->phy_flags & PHY_XGXS_FLAG) { 3220 if (bp->phy_flags & PHY_XGXS_FLAG) {
2764 /* TBD - 3221 mask = (NIG_MASK_XGXS0_LINK10G |
2765 * in force mode (not AN) we can enable just the relevant 3222 NIG_MASK_XGXS0_LINK_STATUS);
2766 * interrupt 3223 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
2767 * Even in AN we might enable only one according to the AN 3224 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2768 * speed mask 3225 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
2769 */ 3226 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
2770 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 3227 (ext_phy_type !=
2771 (NIG_MASK_XGXS0_LINK_STATUS | 3228 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
2772 NIG_MASK_XGXS0_LINK10G)); 3229 mask |= NIG_MASK_MI_INT;
2773 DP(NETIF_MSG_LINK, "enable XGXS interrupt\n"); 3230 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3231 }
2774 3232
2775 } else { /* SerDes */ 3233 } else { /* SerDes */
2776 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 3234 mask = NIG_MASK_SERDES0_LINK_STATUS;
2777 NIG_MASK_SERDES0_LINK_STATUS); 3235 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
2778 DP(NETIF_MSG_LINK, "enable SerDes interrupt\n"); 3236 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3237 if ((ext_phy_type !=
3238 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3239 (ext_phy_type !=
3240 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3241 mask |= NIG_MASK_MI_INT;
3242 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3243 }
2779 } 3244 }
3245 bnx2x_bits_en(bp,
3246 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3247 mask);
3248 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3249 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3250 " 10G %x, XGXS_LINK %x\n", port,
3251 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3252 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3253 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3254 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3255 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3256 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3257 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3258 );
3259}
3260
3261static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3262{
3263 u32 ext_phy_addr = ((bp->ext_phy_config &
3264 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3265 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3266 u32 fw_ver1, fw_ver2;
3267
3268 /* Need to wait 200ms after reset */
3269 msleep(200);
3270 /* Boot port from external ROM
3271 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3272 */
3273 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3274 EXT_PHY_KR_PMA_PMD_DEVAD,
3275 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3276
3277 /* Reset internal microprocessor */
3278 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3279 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3280 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3281 /* set micro reset = 0 */
3282 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3283 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3284 EXT_PHY_KR_ROM_MICRO_RESET);
3285 /* Reset internal microprocessor */
3286 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3287 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3288 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3289 /* wait for 100ms for code download via SPI port */
3290 msleep(100);
3291
3292 /* Clear ser_boot_ctl bit */
3293 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3294 EXT_PHY_KR_PMA_PMD_DEVAD,
3295 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3296 /* Wait 100ms */
3297 msleep(100);
3298
3299 /* Print the PHY FW version */
3300 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3301 EXT_PHY_KR_PMA_PMD_DEVAD,
3302 0xca19, &fw_ver1);
3303 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3304 EXT_PHY_KR_PMA_PMD_DEVAD,
3305 0xca1a, &fw_ver2);
3306 DP(NETIF_MSG_LINK,
3307 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3308}
3309
3310static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3311{
3312 u32 ext_phy_addr = ((bp->ext_phy_config &
3313 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3314 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3315
3316 /* Force KR or KX */
3317 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3318 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3319 0x2040);
3320 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3321 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3322 0x000b);
3323 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3324 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3325 0x0000);
3326 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3327 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3328 0x0000);
2780} 3329}
2781 3330
2782static void bnx2x_ext_phy_init(struct bnx2x *bp) 3331static void bnx2x_ext_phy_init(struct bnx2x *bp)
2783{ 3332{
2784 int port = bp->port;
2785 u32 ext_phy_type; 3333 u32 ext_phy_type;
2786 u32 ext_phy_addr; 3334 u32 ext_phy_addr;
2787 u32 local_phy; 3335 u32 cnt;
3336 u32 ctrl;
3337 u32 val = 0;
2788 3338
2789 if (bp->phy_flags & PHY_XGXS_FLAG) { 3339 if (bp->phy_flags & PHY_XGXS_FLAG) {
2790 local_phy = bp->phy_addr;
2791 ext_phy_addr = ((bp->ext_phy_config & 3340 ext_phy_addr = ((bp->ext_phy_config &
2792 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> 3341 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2793 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); 3342 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2794 3343
2795 ext_phy_type = XGXS_EXT_PHY_TYPE(bp); 3344 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3345 /* Make sure that the soft reset is off (expect for the 8072:
3346 * due to the lock, it will be done inside the specific
3347 * handling)
3348 */
3349 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3350 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3351 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3352 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3353 /* Wait for soft reset to get cleared upto 1 sec */
3354 for (cnt = 0; cnt < 1000; cnt++) {
3355 bnx2x_mdio45_read(bp, ext_phy_addr,
3356 EXT_PHY_OPT_PMA_PMD_DEVAD,
3357 EXT_PHY_OPT_CNTL, &ctrl);
3358 if (!(ctrl & (1<<15)))
3359 break;
3360 msleep(1);
3361 }
3362 DP(NETIF_MSG_LINK,
3363 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3364 }
3365
2796 switch (ext_phy_type) { 3366 switch (ext_phy_type) {
2797 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 3367 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2798 DP(NETIF_MSG_LINK, "XGXS Direct\n"); 3368 DP(NETIF_MSG_LINK, "XGXS Direct\n");
@@ -2800,49 +3370,235 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
2800 3370
2801 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 3371 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2802 DP(NETIF_MSG_LINK, "XGXS 8705\n"); 3372 DP(NETIF_MSG_LINK, "XGXS 8705\n");
2803 bnx2x_bits_en(bp,
2804 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2805 NIG_MASK_MI_INT);
2806 DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
2807 3373
2808 bp->phy_addr = ext_phy_type; 3374 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
2809 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3375 EXT_PHY_OPT_PMA_PMD_DEVAD,
2810 EXT_PHY_OPT_PMD_MISC_CNTL, 3376 EXT_PHY_OPT_PMD_MISC_CNTL,
2811 0x8288); 3377 0x8288);
2812 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3378 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3379 EXT_PHY_OPT_PMA_PMD_DEVAD,
2813 EXT_PHY_OPT_PHY_IDENTIFIER, 3380 EXT_PHY_OPT_PHY_IDENTIFIER,
2814 0x7fbf); 3381 0x7fbf);
2815 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3382 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3383 EXT_PHY_OPT_PMA_PMD_DEVAD,
2816 EXT_PHY_OPT_CMU_PLL_BYPASS, 3384 EXT_PHY_OPT_CMU_PLL_BYPASS,
2817 0x0100); 3385 0x0100);
2818 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD, 3386 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3387 EXT_PHY_OPT_WIS_DEVAD,
2819 EXT_PHY_OPT_LASI_CNTL, 0x1); 3388 EXT_PHY_OPT_LASI_CNTL, 0x1);
2820 break; 3389 break;
2821 3390
2822 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 3391 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2823 DP(NETIF_MSG_LINK, "XGXS 8706\n"); 3392 DP(NETIF_MSG_LINK, "XGXS 8706\n");
2824 bnx2x_bits_en(bp, 3393
2825 NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 3394 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2826 NIG_MASK_MI_INT); 3395 /* Force speed */
2827 DP(NETIF_MSG_LINK, "enabled extenal phy int\n"); 3396 if (bp->req_line_speed == SPEED_10000) {
2828 3397 DP(NETIF_MSG_LINK,
2829 bp->phy_addr = ext_phy_type; 3398 "XGXS 8706 force 10Gbps\n");
2830 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3399 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
2831 EXT_PHY_OPT_PMD_DIGITAL_CNT, 3400 EXT_PHY_OPT_PMA_PMD_DEVAD,
2832 0x400); 3401 EXT_PHY_OPT_PMD_DIGITAL_CNT,
2833 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, 3402 0x400);
3403 } else {
3404 /* Force 1Gbps */
3405 DP(NETIF_MSG_LINK,
3406 "XGXS 8706 force 1Gbps\n");
3407
3408 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3409 EXT_PHY_OPT_PMA_PMD_DEVAD,
3410 EXT_PHY_OPT_CNTL,
3411 0x0040);
3412
3413 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3414 EXT_PHY_OPT_PMA_PMD_DEVAD,
3415 EXT_PHY_OPT_CNTL2,
3416 0x000D);
3417 }
3418
3419 /* Enable LASI */
3420 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3421 EXT_PHY_OPT_PMA_PMD_DEVAD,
3422 EXT_PHY_OPT_LASI_CNTL,
3423 0x1);
3424 } else {
3425 /* AUTONEG */
3426 /* Allow CL37 through CL73 */
3427 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3428 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3429 EXT_PHY_AUTO_NEG_DEVAD,
3430 EXT_PHY_OPT_AN_CL37_CL73,
3431 0x040c);
3432
3433 /* Enable Full-Duplex advertisment on CL37 */
3434 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3435 EXT_PHY_AUTO_NEG_DEVAD,
3436 EXT_PHY_OPT_AN_CL37_FD,
3437 0x0020);
3438 /* Enable CL37 AN */
3439 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3440 EXT_PHY_AUTO_NEG_DEVAD,
3441 EXT_PHY_OPT_AN_CL37_AN,
3442 0x1000);
3443 /* Advertise 10G/1G support */
3444 if (bp->advertising &
3445 ADVERTISED_1000baseT_Full)
3446 val = (1<<5);
3447 if (bp->advertising &
3448 ADVERTISED_10000baseT_Full)
3449 val |= (1<<7);
3450
3451 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3452 EXT_PHY_AUTO_NEG_DEVAD,
3453 EXT_PHY_OPT_AN_ADV, val);
3454 /* Enable LASI */
3455 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3456 EXT_PHY_OPT_PMA_PMD_DEVAD,
3457 EXT_PHY_OPT_LASI_CNTL,
3458 0x1);
3459
3460 /* Enable clause 73 AN */
3461 bnx2x_mdio45_write(bp, ext_phy_addr,
3462 EXT_PHY_AUTO_NEG_DEVAD,
3463 EXT_PHY_OPT_CNTL,
3464 0x1200);
3465 }
3466 break;
3467
3468 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3469 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3470 /* Wait for soft reset to get cleared upto 1 sec */
3471 for (cnt = 0; cnt < 1000; cnt++) {
3472 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3473 ext_phy_addr,
3474 EXT_PHY_OPT_PMA_PMD_DEVAD,
3475 EXT_PHY_OPT_CNTL, &ctrl);
3476 if (!(ctrl & (1<<15)))
3477 break;
3478 msleep(1);
3479 }
3480 DP(NETIF_MSG_LINK,
3481 "8072 control reg 0x%x (after %d ms)\n",
3482 ctrl, cnt);
3483
3484 bnx2x_bcm8072_external_rom_boot(bp);
3485 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3486
3487 /* enable LASI */
3488 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3489 ext_phy_addr,
3490 EXT_PHY_KR_PMA_PMD_DEVAD,
3491 0x9000, 0x0400);
3492 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3493 ext_phy_addr,
3494 EXT_PHY_KR_PMA_PMD_DEVAD,
3495 EXT_PHY_KR_LASI_CNTL, 0x0004);
3496
3497 /* If this is forced speed, set to KR or KX
3498 * (all other are not supported)
3499 */
3500 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3501 if (bp->req_line_speed == SPEED_10000) {
3502 bnx2x_bcm8072_force_10G(bp);
3503 DP(NETIF_MSG_LINK,
3504 "Forced speed 10G on 8072\n");
3505 /* unlock */
3506 bnx2x_hw_unlock(bp,
3507 HW_LOCK_RESOURCE_8072_MDIO);
3508 break;
3509 } else
3510 val = (1<<5);
3511 } else {
3512
3513 /* Advertise 10G/1G support */
3514 if (bp->advertising &
3515 ADVERTISED_1000baseT_Full)
3516 val = (1<<5);
3517 if (bp->advertising &
3518 ADVERTISED_10000baseT_Full)
3519 val |= (1<<7);
3520 }
3521 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3522 ext_phy_addr,
3523 EXT_PHY_KR_AUTO_NEG_DEVAD,
3524 0x11, val);
3525 /* Add support for CL37 ( passive mode ) I */
3526 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3527 ext_phy_addr,
3528 EXT_PHY_KR_AUTO_NEG_DEVAD,
3529 0x8370, 0x040c);
3530 /* Add support for CL37 ( passive mode ) II */
3531 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3532 ext_phy_addr,
3533 EXT_PHY_KR_AUTO_NEG_DEVAD,
3534 0xffe4, 0x20);
3535 /* Add support for CL37 ( passive mode ) III */
3536 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3537 ext_phy_addr,
3538 EXT_PHY_KR_AUTO_NEG_DEVAD,
3539 0xffe0, 0x1000);
3540 /* Restart autoneg */
3541 msleep(500);
3542 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3543 ext_phy_addr,
3544 EXT_PHY_KR_AUTO_NEG_DEVAD,
3545 EXT_PHY_KR_CTRL, 0x1200);
3546 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3547 "1G %ssupported 10G %ssupported\n",
3548 (val & (1<<5)) ? "" : "not ",
3549 (val & (1<<7)) ? "" : "not ");
3550
3551 /* unlock */
3552 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3553 break;
3554
3555 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3556 DP(NETIF_MSG_LINK,
3557 "Setting the SFX7101 LASI indication\n");
3558 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3559 EXT_PHY_OPT_PMA_PMD_DEVAD,
2834 EXT_PHY_OPT_LASI_CNTL, 0x1); 3560 EXT_PHY_OPT_LASI_CNTL, 0x1);
3561 DP(NETIF_MSG_LINK,
3562 "Setting the SFX7101 LED to blink on traffic\n");
3563 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3564 EXT_PHY_OPT_PMA_PMD_DEVAD,
3565 0xC007, (1<<3));
3566
3567 /* read modify write pause advertizing */
3568 bnx2x_mdio45_read(bp, ext_phy_addr,
3569 EXT_PHY_KR_AUTO_NEG_DEVAD,
3570 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3571 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3572 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3573 if (bp->advertising & ADVERTISED_Pause)
3574 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3575
3576 if (bp->advertising & ADVERTISED_Asym_Pause) {
3577 val |=
3578 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3579 }
3580 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3581 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3582 EXT_PHY_KR_AUTO_NEG_DEVAD,
3583 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3584 /* Restart autoneg */
3585 bnx2x_mdio45_read(bp, ext_phy_addr,
3586 EXT_PHY_KR_AUTO_NEG_DEVAD,
3587 EXT_PHY_KR_CTRL, &val);
3588 val |= 0x200;
3589 bnx2x_mdio45_write(bp, ext_phy_addr,
3590 EXT_PHY_KR_AUTO_NEG_DEVAD,
3591 EXT_PHY_KR_CTRL, val);
2835 break; 3592 break;
2836 3593
2837 default: 3594 default:
2838 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", 3595 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
2839 bp->ext_phy_config); 3596 bp->ext_phy_config);
2840 break; 3597 break;
2841 } 3598 }
2842 bp->phy_addr = local_phy;
2843 3599
2844 } else { /* SerDes */ 3600 } else { /* SerDes */
2845/* ext_phy_addr = ((bp->ext_phy_config & 3601/* ext_phy_addr = ((bp->ext_phy_config &
2846 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >> 3602 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2847 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT); 3603 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2848*/ 3604*/
@@ -2854,10 +3610,6 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
2854 3610
2855 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 3611 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2856 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 3612 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2857 bnx2x_bits_en(bp,
2858 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2859 NIG_MASK_MI_INT);
2860 DP(NETIF_MSG_LINK, "enabled extenal phy int\n");
2861 break; 3613 break;
2862 3614
2863 default: 3615 default:
@@ -2871,8 +3623,22 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp)
2871static void bnx2x_ext_phy_reset(struct bnx2x *bp) 3623static void bnx2x_ext_phy_reset(struct bnx2x *bp)
2872{ 3624{
2873 u32 ext_phy_type; 3625 u32 ext_phy_type;
2874 u32 ext_phy_addr; 3626 u32 ext_phy_addr = ((bp->ext_phy_config &
2875 u32 local_phy; 3627 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3628 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3629 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3630
3631 /* The PHY reset is controled by GPIO 1
3632 * Give it 1ms of reset pulse
3633 */
3634 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3635 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3636 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3637 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3638 msleep(1);
3639 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3640 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3641 }
2876 3642
2877 if (bp->phy_flags & PHY_XGXS_FLAG) { 3643 if (bp->phy_flags & PHY_XGXS_FLAG) {
2878 ext_phy_type = XGXS_EXT_PHY_TYPE(bp); 3644 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
@@ -2883,15 +3649,24 @@ static void bnx2x_ext_phy_reset(struct bnx2x *bp)
2883 3649
2884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 3650 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2885 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 3651 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2886 DP(NETIF_MSG_LINK, "XGXS 8705/6\n"); 3652 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
2887 local_phy = bp->phy_addr; 3653 bnx2x_mdio45_write(bp, ext_phy_addr,
2888 ext_phy_addr = ((bp->ext_phy_config & 3654 EXT_PHY_OPT_PMA_PMD_DEVAD,
2889 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2890 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2891 bp->phy_addr = (u8)ext_phy_addr;
2892 bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2893 EXT_PHY_OPT_CNTL, 0xa040); 3655 EXT_PHY_OPT_CNTL, 0xa040);
2894 bp->phy_addr = local_phy; 3656 break;
3657
3658 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3659 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3660 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3661 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3662 ext_phy_addr,
3663 EXT_PHY_KR_PMA_PMD_DEVAD,
3664 0, 1<<15);
3665 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3666 break;
3667
3668 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3669 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
2895 break; 3670 break;
2896 3671
2897 default: 3672 default:
@@ -2930,6 +3705,7 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
2930 NIG_MASK_SERDES0_LINK_STATUS | 3705 NIG_MASK_SERDES0_LINK_STATUS |
2931 NIG_MASK_MI_INT)); 3706 NIG_MASK_MI_INT));
2932 3707
3708 /* Activate the external PHY */
2933 bnx2x_ext_phy_reset(bp); 3709 bnx2x_ext_phy_reset(bp);
2934 3710
2935 bnx2x_set_aer_mmd(bp); 3711 bnx2x_set_aer_mmd(bp);
@@ -2994,13 +3770,13 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
2994 /* AN enabled */ 3770 /* AN enabled */
2995 bnx2x_set_brcm_cl37_advertisment(bp); 3771 bnx2x_set_brcm_cl37_advertisment(bp);
2996 3772
2997 /* program duplex & pause advertisment (for aneg) */ 3773 /* program duplex & pause advertisement (for aneg) */
2998 bnx2x_set_ieee_aneg_advertisment(bp); 3774 bnx2x_set_ieee_aneg_advertisment(bp);
2999 3775
3000 /* enable autoneg */ 3776 /* enable autoneg */
3001 bnx2x_set_autoneg(bp); 3777 bnx2x_set_autoneg(bp);
3002 3778
3003 /* enalbe and restart AN */ 3779 /* enable and restart AN */
3004 bnx2x_restart_autoneg(bp); 3780 bnx2x_restart_autoneg(bp);
3005 } 3781 }
3006 3782
@@ -3010,11 +3786,11 @@ static void bnx2x_link_initialize(struct bnx2x *bp)
3010 bnx2x_initialize_sgmii_process(bp); 3786 bnx2x_initialize_sgmii_process(bp);
3011 } 3787 }
3012 3788
3013 /* enable the interrupt */
3014 bnx2x_link_int_enable(bp);
3015
3016 /* init ext phy and enable link state int */ 3789 /* init ext phy and enable link state int */
3017 bnx2x_ext_phy_init(bp); 3790 bnx2x_ext_phy_init(bp);
3791
3792 /* enable the interrupt */
3793 bnx2x_link_int_enable(bp);
3018} 3794}
3019 3795
3020static void bnx2x_phy_deassert(struct bnx2x *bp) 3796static void bnx2x_phy_deassert(struct bnx2x *bp)
@@ -3073,6 +3849,11 @@ static int bnx2x_phy_init(struct bnx2x *bp)
3073static void bnx2x_link_reset(struct bnx2x *bp) 3849static void bnx2x_link_reset(struct bnx2x *bp)
3074{ 3850{
3075 int port = bp->port; 3851 int port = bp->port;
3852 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3853
3854 /* update shared memory */
3855 bp->link_status = 0;
3856 bnx2x_update_mng(bp);
3076 3857
3077 /* disable attentions */ 3858 /* disable attentions */
3078 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 3859 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
@@ -3081,21 +3862,45 @@ static void bnx2x_link_reset(struct bnx2x *bp)
3081 NIG_MASK_SERDES0_LINK_STATUS | 3862 NIG_MASK_SERDES0_LINK_STATUS |
3082 NIG_MASK_MI_INT)); 3863 NIG_MASK_MI_INT));
3083 3864
3084 bnx2x_ext_phy_reset(bp); 3865 /* activate nig drain */
3866 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3867
3868 /* disable nig egress interface */
3869 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3870 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3871
3872 /* Stop BigMac rx */
3873 bnx2x_bmac_rx_disable(bp);
3874
3875 /* disable emac */
3876 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3877
3878 msleep(10);
3879
3880 /* The PHY reset is controled by GPIO 1
3881 * Hold it as output low
3882 */
3883 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3884 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3885 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3886 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3887 DP(NETIF_MSG_LINK, "reset external PHY\n");
3888 }
3085 3889
3086 /* reset the SerDes/XGXS */ 3890 /* reset the SerDes/XGXS */
3087 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, 3891 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3088 (0x1ff << (port*16))); 3892 (0x1ff << (port*16)));
3089 3893
3090 /* reset EMAC / BMAC and disable NIG interfaces */ 3894 /* reset BigMac */
3091 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0); 3895 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3092 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0); 3896 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3093 3897
3094 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0); 3898 /* disable nig ingress interface */
3899 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3095 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0); 3900 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3096 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3097 3901
3098 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 3902 /* set link down */
3903 bp->link_up = 0;
3099} 3904}
3100 3905
3101#ifdef BNX2X_XGXS_LB 3906#ifdef BNX2X_XGXS_LB
@@ -3158,7 +3963,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3158 int port = bp->port; 3963 int port = bp->port;
3159 3964
3160 DP(NETIF_MSG_TIMER, 3965 DP(NETIF_MSG_TIMER,
3161 "spe (%x:%x) command %x hw_cid %x data (%x:%x) left %x\n", 3966 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
3162 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + 3967 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3163 (void *)bp->spq_prod_bd - (void *)bp->spq), command, 3968 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3164 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); 3969 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
@@ -3176,6 +3981,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3176 bnx2x_panic(); 3981 bnx2x_panic();
3177 return -EBUSY; 3982 return -EBUSY;
3178 } 3983 }
3984
3179 /* CID needs port number to be encoded int it */ 3985 /* CID needs port number to be encoded int it */
3180 bp->spq_prod_bd->hdr.conn_and_cmd_data = 3986 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3181 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 3987 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
@@ -3282,8 +4088,8 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3282 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8; 4088 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
3283 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 4089 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3284 MISC_REG_AEU_MASK_ATTN_FUNC_0; 4090 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3285 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 4091 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3286 NIG_REG_MASK_INTERRUPT_PORT0; 4092 NIG_REG_MASK_INTERRUPT_PORT0;
3287 4093
3288 if (~bp->aeu_mask & (asserted & 0xff)) 4094 if (~bp->aeu_mask & (asserted & 0xff))
3289 BNX2X_ERR("IGU ERROR\n"); 4095 BNX2X_ERR("IGU ERROR\n");
@@ -3301,15 +4107,11 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3301 4107
3302 if (asserted & ATTN_HARD_WIRED_MASK) { 4108 if (asserted & ATTN_HARD_WIRED_MASK) {
3303 if (asserted & ATTN_NIG_FOR_FUNC) { 4109 if (asserted & ATTN_NIG_FOR_FUNC) {
3304 u32 nig_status_port;
3305 u32 nig_int_addr = port ?
3306 NIG_REG_STATUS_INTERRUPT_PORT1 :
3307 NIG_REG_STATUS_INTERRUPT_PORT0;
3308 4110
3309 bp->nig_mask = REG_RD(bp, nig_mask_addr); 4111 /* save nig interrupt mask */
3310 REG_WR(bp, nig_mask_addr, 0); 4112 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
4113 REG_WR(bp, nig_int_mask_addr, 0);
3311 4114
3312 nig_status_port = REG_RD(bp, nig_int_addr);
3313 bnx2x_link_update(bp); 4115 bnx2x_link_update(bp);
3314 4116
3315 /* handle unicore attn? */ 4117 /* handle unicore attn? */
@@ -3362,15 +4164,132 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3362 4164
3363 /* now set back the mask */ 4165 /* now set back the mask */
3364 if (asserted & ATTN_NIG_FOR_FUNC) 4166 if (asserted & ATTN_NIG_FOR_FUNC)
3365 REG_WR(bp, nig_mask_addr, bp->nig_mask); 4167 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
3366} 4168}
3367 4169
3368static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 4170static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3369{ 4171{
3370 int port = bp->port; 4172 int port = bp->port;
3371 int index; 4173 int reg_offset;
4174 u32 val;
4175
4176 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4177
4178 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4179 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4180
4181 val = REG_RD(bp, reg_offset);
4182 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4183 REG_WR(bp, reg_offset, val);
4184
4185 BNX2X_ERR("SPIO5 hw attention\n");
4186
4187 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4188 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4189 /* Fan failure attention */
4190
4191 /* The PHY reset is controled by GPIO 1 */
4192 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
4193 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4194 /* Low power mode is controled by GPIO 2 */
4195 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4196 MISC_REGISTERS_GPIO_OUTPUT_LOW);
4197 /* mark the failure */
4198 bp->ext_phy_config &=
4199 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4200 bp->ext_phy_config |=
4201 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4202 SHMEM_WR(bp,
4203 dev_info.port_hw_config[port].
4204 external_phy_config,
4205 bp->ext_phy_config);
4206 /* log the failure */
4207 printk(KERN_ERR PFX "Fan Failure on Network"
4208 " Controller %s has caused the driver to"
4209 " shutdown the card to prevent permanent"
4210 " damage. Please contact Dell Support for"
4211 " assistance\n", bp->dev->name);
4212 break;
4213
4214 default:
4215 break;
4216 }
4217 }
4218}
4219
4220static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4221{
4222 u32 val;
4223
4224 if (attn & BNX2X_DOORQ_ASSERT) {
4225
4226 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4227 BNX2X_ERR("DB hw attention 0x%x\n", val);
4228 /* DORQ discard attention */
4229 if (val & 0x2)
4230 BNX2X_ERR("FATAL error from DORQ\n");
4231 }
4232}
4233
4234static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4235{
4236 u32 val;
4237
4238 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4239
4240 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4241 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4242 /* CFC error attention */
4243 if (val & 0x2)
4244 BNX2X_ERR("FATAL error from CFC\n");
4245 }
4246
4247 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4248
4249 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4250 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4251 /* RQ_USDMDP_FIFO_OVERFLOW */
4252 if (val & 0x18000)
4253 BNX2X_ERR("FATAL error from PXP\n");
4254 }
4255}
4256
4257static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4258{
4259 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4260
4261 if (attn & BNX2X_MC_ASSERT_BITS) {
4262
4263 BNX2X_ERR("MC assert!\n");
4264 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4265 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4266 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4267 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4268 bnx2x_panic();
4269
4270 } else if (attn & BNX2X_MCP_ASSERT) {
4271
4272 BNX2X_ERR("MCP assert!\n");
4273 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4274 bnx2x_mc_assert(bp);
4275
4276 } else
4277 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4278 }
4279
4280 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4281
4282 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4283 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
4284 }
4285}
4286
4287static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4288{
3372 struct attn_route attn; 4289 struct attn_route attn;
3373 struct attn_route group_mask; 4290 struct attn_route group_mask;
4291 int port = bp->port;
4292 int index;
3374 u32 reg_addr; 4293 u32 reg_addr;
3375 u32 val; 4294 u32 val;
3376 4295
@@ -3391,64 +4310,14 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3391 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index, 4310 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
3392 (unsigned long long)group_mask.sig[0]); 4311 (unsigned long long)group_mask.sig[0]);
3393 4312
3394 if (attn.sig[3] & group_mask.sig[3] & 4313 bnx2x_attn_int_deasserted3(bp,
3395 EVEREST_GEN_ATTN_IN_USE_MASK) { 4314 attn.sig[3] & group_mask.sig[3]);
3396 4315 bnx2x_attn_int_deasserted1(bp,
3397 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) { 4316 attn.sig[1] & group_mask.sig[1]);
3398 4317 bnx2x_attn_int_deasserted2(bp,
3399 BNX2X_ERR("MC assert!\n"); 4318 attn.sig[2] & group_mask.sig[2]);
3400 bnx2x_panic(); 4319 bnx2x_attn_int_deasserted0(bp,
3401 4320 attn.sig[0] & group_mask.sig[0]);
3402 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
3403
3404 BNX2X_ERR("MCP assert!\n");
3405 REG_WR(bp,
3406 MISC_REG_AEU_GENERAL_ATTN_11, 0);
3407 bnx2x_mc_assert(bp);
3408
3409 } else {
3410 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
3411 }
3412 }
3413
3414 if (attn.sig[1] & group_mask.sig[1] &
3415 BNX2X_DOORQ_ASSERT) {
3416
3417 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3418 BNX2X_ERR("DB hw attention 0x%x\n", val);
3419 /* DORQ discard attention */
3420 if (val & 0x2)
3421 BNX2X_ERR("FATAL error from DORQ\n");
3422 }
3423
3424 if (attn.sig[2] & group_mask.sig[2] &
3425 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3426
3427 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3428 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3429 /* CFC error attention */
3430 if (val & 0x2)
3431 BNX2X_ERR("FATAL error from CFC\n");
3432 }
3433
3434 if (attn.sig[2] & group_mask.sig[2] &
3435 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3436
3437 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3438 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3439 /* RQ_USDMDP_FIFO_OVERFLOW */
3440 if (val & 0x18000)
3441 BNX2X_ERR("FATAL error from PXP\n");
3442 }
3443
3444 if (attn.sig[3] & group_mask.sig[3] &
3445 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3446
3447 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
3448 0x7ff);
3449 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
3450 attn.sig[3]);
3451 }
3452 4321
3453 if ((attn.sig[0] & group_mask.sig[0] & 4322 if ((attn.sig[0] & group_mask.sig[0] &
3454 HW_INTERRUT_ASSERT_SET_0) || 4323 HW_INTERRUT_ASSERT_SET_0) ||
@@ -3456,7 +4325,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3456 HW_INTERRUT_ASSERT_SET_1) || 4325 HW_INTERRUT_ASSERT_SET_1) ||
3457 (attn.sig[2] & group_mask.sig[2] & 4326 (attn.sig[2] & group_mask.sig[2] &
3458 HW_INTERRUT_ASSERT_SET_2)) 4327 HW_INTERRUT_ASSERT_SET_2))
3459 BNX2X_ERR("FATAL HW block attention\n"); 4328 BNX2X_ERR("FATAL HW block attention"
4329 " set0 0x%x set1 0x%x"
4330 " set2 0x%x\n",
4331 (attn.sig[0] & group_mask.sig[0] &
4332 HW_INTERRUT_ASSERT_SET_0),
4333 (attn.sig[1] & group_mask.sig[1] &
4334 HW_INTERRUT_ASSERT_SET_1),
4335 (attn.sig[2] & group_mask.sig[2] &
4336 HW_INTERRUT_ASSERT_SET_2));
3460 4337
3461 if ((attn.sig[0] & group_mask.sig[0] & 4338 if ((attn.sig[0] & group_mask.sig[0] &
3462 HW_PRTY_ASSERT_SET_0) || 4339 HW_PRTY_ASSERT_SET_0) ||
@@ -3464,7 +4341,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3464 HW_PRTY_ASSERT_SET_1) || 4341 HW_PRTY_ASSERT_SET_1) ||
3465 (attn.sig[2] & group_mask.sig[2] & 4342 (attn.sig[2] & group_mask.sig[2] &
3466 HW_PRTY_ASSERT_SET_2)) 4343 HW_PRTY_ASSERT_SET_2))
3467 BNX2X_ERR("FATAL HW block parity atention\n"); 4344 BNX2X_ERR("FATAL HW block parity attention\n");
3468 } 4345 }
3469 } 4346 }
3470 4347
@@ -3529,7 +4406,7 @@ static void bnx2x_sp_task(struct work_struct *work)
3529 4406
3530 /* Return here if interrupt is disabled */ 4407 /* Return here if interrupt is disabled */
3531 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 4408 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3532 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 4409 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
3533 return; 4410 return;
3534 } 4411 }
3535 4412
@@ -3539,12 +4416,11 @@ static void bnx2x_sp_task(struct work_struct *work)
3539 4416
3540 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); 4417 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3541 4418
3542 if (status & 0x1) { 4419 /* HW attentions */
3543 /* HW attentions */ 4420 if (status & 0x1)
3544 bnx2x_attn_int(bp); 4421 bnx2x_attn_int(bp);
3545 }
3546 4422
3547 /* CStorm events: query_stats, cfc delete ramrods */ 4423 /* CStorm events: query_stats, port delete ramrod */
3548 if (status & 0x2) 4424 if (status & 0x2)
3549 bp->stat_pending = 0; 4425 bp->stat_pending = 0;
3550 4426
@@ -3558,6 +4434,7 @@ static void bnx2x_sp_task(struct work_struct *work)
3558 IGU_INT_NOP, 1); 4434 IGU_INT_NOP, 1);
3559 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), 4435 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3560 IGU_INT_ENABLE, 1); 4436 IGU_INT_ENABLE, 1);
4437
3561} 4438}
3562 4439
3563static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 4440static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3567,11 +4444,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3567 4444
3568 /* Return here if interrupt is disabled */ 4445 /* Return here if interrupt is disabled */
3569 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 4446 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3570 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 4447 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
3571 return IRQ_HANDLED; 4448 return IRQ_HANDLED;
3572 } 4449 }
3573 4450
3574 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0); 4451 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
3575 4452
3576#ifdef BNX2X_STOP_ON_ERROR 4453#ifdef BNX2X_STOP_ON_ERROR
3577 if (unlikely(bp->panic)) 4454 if (unlikely(bp->panic))
@@ -3906,7 +4783,7 @@ static void bnx2x_stop_stats(struct bnx2x *bp)
3906 4783
3907 while (bp->stats_state != STATS_STATE_DISABLE) { 4784 while (bp->stats_state != STATS_STATE_DISABLE) {
3908 if (!timeout) { 4785 if (!timeout) {
3909 BNX2X_ERR("timeout wating for stats stop\n"); 4786 BNX2X_ERR("timeout waiting for stats stop\n");
3910 break; 4787 break;
3911 } 4788 }
3912 timeout--; 4789 timeout--;
@@ -4173,39 +5050,37 @@ static void bnx2x_update_net_stats(struct bnx2x *bp)
4173 5050
4174 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); 5051 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4175 5052
4176 nstats->tx_bytes = 5053 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4177 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4178 5054
4179 nstats->rx_dropped = estats->checksum_discard + 5055 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
4180 estats->mac_discard;
4181 nstats->tx_dropped = 0; 5056 nstats->tx_dropped = 0;
4182 5057
4183 nstats->multicast = 5058 nstats->multicast =
4184 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi); 5059 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4185 5060
4186 nstats->collisions = 5061 nstats->collisions = estats->single_collision_transmit_frames +
4187 estats->single_collision_transmit_frames + 5062 estats->multiple_collision_transmit_frames +
4188 estats->multiple_collision_transmit_frames + 5063 estats->late_collision_frames +
4189 estats->late_collision_frames + 5064 estats->excessive_collision_frames;
4190 estats->excessive_collision_frames;
4191 5065
4192 nstats->rx_length_errors = estats->runt_packets_received + 5066 nstats->rx_length_errors = estats->runt_packets_received +
4193 estats->jabber_packets_received; 5067 estats->jabber_packets_received;
4194 nstats->rx_over_errors = estats->no_buff_discard; 5068 nstats->rx_over_errors = estats->brb_discard +
5069 estats->brb_truncate_discard;
4195 nstats->rx_crc_errors = estats->crc_receive_errors; 5070 nstats->rx_crc_errors = estats->crc_receive_errors;
4196 nstats->rx_frame_errors = estats->alignment_errors; 5071 nstats->rx_frame_errors = estats->alignment_errors;
4197 nstats->rx_fifo_errors = estats->brb_discard + 5072 nstats->rx_fifo_errors = estats->no_buff_discard;
4198 estats->brb_truncate_discard;
4199 nstats->rx_missed_errors = estats->xxoverflow_discard; 5073 nstats->rx_missed_errors = estats->xxoverflow_discard;
4200 5074
4201 nstats->rx_errors = nstats->rx_length_errors + 5075 nstats->rx_errors = nstats->rx_length_errors +
4202 nstats->rx_over_errors + 5076 nstats->rx_over_errors +
4203 nstats->rx_crc_errors + 5077 nstats->rx_crc_errors +
4204 nstats->rx_frame_errors + 5078 nstats->rx_frame_errors +
4205 nstats->rx_fifo_errors; 5079 nstats->rx_fifo_errors +
5080 nstats->rx_missed_errors;
4206 5081
4207 nstats->tx_aborted_errors = estats->late_collision_frames + 5082 nstats->tx_aborted_errors = estats->late_collision_frames +
4208 estats->excessive_collision_frames; 5083 estats->excessive_collision_frames;
4209 nstats->tx_carrier_errors = estats->false_carrier_detections; 5084 nstats->tx_carrier_errors = estats->false_carrier_detections;
4210 nstats->tx_fifo_errors = 0; 5085 nstats->tx_fifo_errors = 0;
4211 nstats->tx_heartbeat_errors = 0; 5086 nstats->tx_heartbeat_errors = 0;
@@ -4334,7 +5209,7 @@ static void bnx2x_timer(unsigned long data)
4334 return; 5209 return;
4335 5210
4336 if (atomic_read(&bp->intr_sem) != 0) 5211 if (atomic_read(&bp->intr_sem) != 0)
4337 goto bnx2x_restart_timer; 5212 goto timer_restart;
4338 5213
4339 if (poll) { 5214 if (poll) {
4340 struct bnx2x_fastpath *fp = &bp->fp[0]; 5215 struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -4344,7 +5219,7 @@ static void bnx2x_timer(unsigned long data)
4344 rc = bnx2x_rx_int(fp, 1000); 5219 rc = bnx2x_rx_int(fp, 1000);
4345 } 5220 }
4346 5221
4347 if (!nomcp && (bp->bc_ver >= 0x040003)) { 5222 if (!nomcp) {
4348 int port = bp->port; 5223 int port = bp->port;
4349 u32 drv_pulse; 5224 u32 drv_pulse;
4350 u32 mcp_pulse; 5225 u32 mcp_pulse;
@@ -4353,9 +5228,9 @@ static void bnx2x_timer(unsigned long data)
4353 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; 5228 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4354 /* TBD - add SYSTEM_TIME */ 5229 /* TBD - add SYSTEM_TIME */
4355 drv_pulse = bp->fw_drv_pulse_wr_seq; 5230 drv_pulse = bp->fw_drv_pulse_wr_seq;
4356 SHMEM_WR(bp, drv_fw_mb[port].drv_pulse_mb, drv_pulse); 5231 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
4357 5232
4358 mcp_pulse = (SHMEM_RD(bp, drv_fw_mb[port].mcp_pulse_mb) & 5233 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
4359 MCP_PULSE_SEQ_MASK); 5234 MCP_PULSE_SEQ_MASK);
4360 /* The delta between driver pulse and mcp response 5235 /* The delta between driver pulse and mcp response
4361 * should be 1 (before mcp response) or 0 (after mcp response) 5236 * should be 1 (before mcp response) or 0 (after mcp response)
@@ -4369,11 +5244,11 @@ static void bnx2x_timer(unsigned long data)
4369 } 5244 }
4370 5245
4371 if (bp->stats_state == STATS_STATE_DISABLE) 5246 if (bp->stats_state == STATS_STATE_DISABLE)
4372 goto bnx2x_restart_timer; 5247 goto timer_restart;
4373 5248
4374 bnx2x_update_stats(bp); 5249 bnx2x_update_stats(bp);
4375 5250
4376bnx2x_restart_timer: 5251timer_restart:
4377 mod_timer(&bp->timer, jiffies + bp->current_interval); 5252 mod_timer(&bp->timer, jiffies + bp->current_interval);
4378} 5253}
4379 5254
@@ -4438,6 +5313,9 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4438 atten_status_block); 5313 atten_status_block);
4439 def_sb->atten_status_block.status_block_id = id; 5314 def_sb->atten_status_block.status_block_id = id;
4440 5315
5316 bp->def_att_idx = 0;
5317 bp->attn_state = 0;
5318
4441 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 5319 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4442 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 5320 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4443 5321
@@ -4472,6 +5350,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4472 u_def_status_block); 5350 u_def_status_block);
4473 def_sb->u_def_status_block.status_block_id = id; 5351 def_sb->u_def_status_block.status_block_id = id;
4474 5352
5353 bp->def_u_idx = 0;
5354
4475 REG_WR(bp, BAR_USTRORM_INTMEM + 5355 REG_WR(bp, BAR_USTRORM_INTMEM +
4476 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); 5356 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4477 REG_WR(bp, BAR_USTRORM_INTMEM + 5357 REG_WR(bp, BAR_USTRORM_INTMEM +
@@ -4489,6 +5369,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4489 c_def_status_block); 5369 c_def_status_block);
4490 def_sb->c_def_status_block.status_block_id = id; 5370 def_sb->c_def_status_block.status_block_id = id;
4491 5371
5372 bp->def_c_idx = 0;
5373
4492 REG_WR(bp, BAR_CSTRORM_INTMEM + 5374 REG_WR(bp, BAR_CSTRORM_INTMEM +
4493 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); 5375 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4494 REG_WR(bp, BAR_CSTRORM_INTMEM + 5376 REG_WR(bp, BAR_CSTRORM_INTMEM +
@@ -4506,6 +5388,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4506 t_def_status_block); 5388 t_def_status_block);
4507 def_sb->t_def_status_block.status_block_id = id; 5389 def_sb->t_def_status_block.status_block_id = id;
4508 5390
5391 bp->def_t_idx = 0;
5392
4509 REG_WR(bp, BAR_TSTRORM_INTMEM + 5393 REG_WR(bp, BAR_TSTRORM_INTMEM +
4510 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); 5394 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4511 REG_WR(bp, BAR_TSTRORM_INTMEM + 5395 REG_WR(bp, BAR_TSTRORM_INTMEM +
@@ -4523,6 +5407,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4523 x_def_status_block); 5407 x_def_status_block);
4524 def_sb->x_def_status_block.status_block_id = id; 5408 def_sb->x_def_status_block.status_block_id = id;
4525 5409
5410 bp->def_x_idx = 0;
5411
4526 REG_WR(bp, BAR_XSTRORM_INTMEM + 5412 REG_WR(bp, BAR_XSTRORM_INTMEM +
4527 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); 5413 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4528 REG_WR(bp, BAR_XSTRORM_INTMEM + 5414 REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -4535,6 +5421,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4535 REG_WR16(bp, BAR_XSTRORM_INTMEM + 5421 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4536 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); 5422 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4537 5423
5424 bp->stat_pending = 0;
5425
4538 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 5426 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4539} 5427}
4540 5428
@@ -4626,7 +5514,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4626 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod; 5514 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
4627 fp->rx_pkt = fp->rx_calls = 0; 5515 fp->rx_pkt = fp->rx_calls = 0;
4628 5516
4629 /* Warning! this will genrate an interrupt (to the TSTORM) */ 5517 /* Warning! this will generate an interrupt (to the TSTORM) */
4630 /* must only be done when chip is initialized */ 5518 /* must only be done when chip is initialized */
4631 REG_WR(bp, BAR_TSTRORM_INTMEM + 5519 REG_WR(bp, BAR_TSTRORM_INTMEM +
4632 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod); 5520 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
@@ -4678,7 +5566,6 @@ static void bnx2x_init_sp_ring(struct bnx2x *bp)
4678 5566
4679 bp->spq_left = MAX_SPQ_PENDING; 5567 bp->spq_left = MAX_SPQ_PENDING;
4680 bp->spq_prod_idx = 0; 5568 bp->spq_prod_idx = 0;
4681 bp->dsb_sp_prod_idx = 0;
4682 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; 5569 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4683 bp->spq_prod_bd = bp->spq; 5570 bp->spq_prod_bd = bp->spq;
4684 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; 5571 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
@@ -4755,6 +5642,42 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4755 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 5642 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4756} 5643}
4757 5644
5645static void bnx2x_set_client_config(struct bnx2x *bp)
5646{
5647#ifdef BCM_VLAN
5648 int mode = bp->rx_mode;
5649#endif
5650 int i, port = bp->port;
5651 struct tstorm_eth_client_config tstorm_client = {0};
5652
5653 tstorm_client.mtu = bp->dev->mtu;
5654 tstorm_client.statistics_counter_id = 0;
5655 tstorm_client.config_flags =
5656 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5657#ifdef BCM_VLAN
5658 if (mode && bp->vlgrp) {
5659 tstorm_client.config_flags |=
5660 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5661 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5662 }
5663#endif
5664 if (mode != BNX2X_RX_MODE_PROMISC)
5665 tstorm_client.drop_flags =
5666 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
5667
5668 for_each_queue(bp, i) {
5669 REG_WR(bp, BAR_TSTRORM_INTMEM +
5670 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
5671 ((u32 *)&tstorm_client)[0]);
5672 REG_WR(bp, BAR_TSTRORM_INTMEM +
5673 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
5674 ((u32 *)&tstorm_client)[1]);
5675 }
5676
5677/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5678 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5679}
5680
4758static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 5681static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4759{ 5682{
4760 int mode = bp->rx_mode; 5683 int mode = bp->rx_mode;
@@ -4794,41 +5717,9 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4794/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, 5717/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4795 ((u32 *)&tstorm_mac_filter)[i]); */ 5718 ((u32 *)&tstorm_mac_filter)[i]); */
4796 } 5719 }
4797}
4798 5720
4799static void bnx2x_set_client_config(struct bnx2x *bp, int client_id) 5721 if (mode != BNX2X_RX_MODE_NONE)
4800{ 5722 bnx2x_set_client_config(bp);
4801#ifdef BCM_VLAN
4802 int mode = bp->rx_mode;
4803#endif
4804 int port = bp->port;
4805 struct tstorm_eth_client_config tstorm_client = {0};
4806
4807 tstorm_client.mtu = bp->dev->mtu;
4808 tstorm_client.statistics_counter_id = 0;
4809 tstorm_client.config_flags =
4810 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4811#ifdef BCM_VLAN
4812 if (mode && bp->vlgrp) {
4813 tstorm_client.config_flags |=
4814 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4815 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4816 }
4817#endif
4818 tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
4819 TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
4820 TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
4821 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
4822
4823 REG_WR(bp, BAR_TSTRORM_INTMEM +
4824 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
4825 ((u32 *)&tstorm_client)[0]);
4826 REG_WR(bp, BAR_TSTRORM_INTMEM +
4827 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
4828 ((u32 *)&tstorm_client)[1]);
4829
4830/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
4831 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
4832} 5723}
4833 5724
4834static void bnx2x_init_internal(struct bnx2x *bp) 5725static void bnx2x_init_internal(struct bnx2x *bp)
@@ -4836,7 +5727,6 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4836 int port = bp->port; 5727 int port = bp->port;
4837 struct tstorm_eth_function_common_config tstorm_config = {0}; 5728 struct tstorm_eth_function_common_config tstorm_config = {0};
4838 struct stats_indication_flags stats_flags = {0}; 5729 struct stats_indication_flags stats_flags = {0};
4839 int i;
4840 5730
4841 if (is_multi(bp)) { 5731 if (is_multi(bp)) {
4842 tstorm_config.config_flags = MULTI_FLAGS; 5732 tstorm_config.config_flags = MULTI_FLAGS;
@@ -4850,13 +5740,9 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4850/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", 5740/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4851 (*(u32 *)&tstorm_config)); */ 5741 (*(u32 *)&tstorm_config)); */
4852 5742
4853 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx untill link is up */ 5743 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4854 bnx2x_set_storm_rx_mode(bp); 5744 bnx2x_set_storm_rx_mode(bp);
4855 5745
4856 for_each_queue(bp, i)
4857 bnx2x_set_client_config(bp, i);
4858
4859
4860 stats_flags.collect_eth = cpu_to_le32(1); 5746 stats_flags.collect_eth = cpu_to_le32(1);
4861 5747
4862 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 5748 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
@@ -4902,7 +5788,7 @@ static void bnx2x_nic_init(struct bnx2x *bp)
4902 bnx2x_init_internal(bp); 5788 bnx2x_init_internal(bp);
4903 bnx2x_init_stats(bp); 5789 bnx2x_init_stats(bp);
4904 bnx2x_init_ind_table(bp); 5790 bnx2x_init_ind_table(bp);
4905 bnx2x_enable_int(bp); 5791 bnx2x_int_enable(bp);
4906 5792
4907} 5793}
4908 5794
@@ -5265,8 +6151,10 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5265 if (mode & 0x1) { /* init common */ 6151 if (mode & 0x1) { /* init common */
5266 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n", 6152 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
5267 func, mode); 6153 func, mode);
5268 REG_WR(bp, MISC_REG_RESET_REG_1, 0xffffffff); 6154 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5269 REG_WR(bp, MISC_REG_RESET_REG_2, 0xfffc); 6155 0xffffffff);
6156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6157 0xfffc);
5270 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END); 6158 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5271 6159
5272 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); 6160 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
@@ -5359,7 +6247,7 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5359 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8); 6247 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
5360#endif 6248#endif
5361 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END); 6249 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5362 /* softrest pulse */ 6250 /* soft reset pulse */
5363 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6251 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5364 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6252 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5365 6253
@@ -5413,7 +6301,7 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5413 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6301 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5414 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { 6302 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5415 REG_WR(bp, i, 0xc0cac01a); 6303 REG_WR(bp, i, 0xc0cac01a);
5416 /* TODO: repleace with something meaningfull */ 6304 /* TODO: replace with something meaningful */
5417 } 6305 }
5418 /* SRCH COMMON comes here */ 6306 /* SRCH COMMON comes here */
5419 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6307 REG_WR(bp, SRC_REG_SOFT_RST, 0);
@@ -5486,6 +6374,28 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5486 enable_blocks_attention(bp); 6374 enable_blocks_attention(bp);
5487 /* enable_blocks_parity(bp); */ 6375 /* enable_blocks_parity(bp); */
5488 6376
6377 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6378 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6379 /* Fan failure is indicated by SPIO 5 */
6380 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6381 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6382
6383 /* set to active low mode */
6384 val = REG_RD(bp, MISC_REG_SPIO_INT);
6385 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6386 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6387 REG_WR(bp, MISC_REG_SPIO_INT, val);
6388
6389 /* enable interrupt to signal the IGU */
6390 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6391 val |= (1 << MISC_REGISTERS_SPIO_5);
6392 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6393 break;
6394
6395 default:
6396 break;
6397 }
6398
5489 } /* end of common init */ 6399 } /* end of common init */
5490 6400
5491 /* per port init */ 6401 /* per port init */
@@ -5645,9 +6555,21 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5645 /* Port MCP comes here */ 6555 /* Port MCP comes here */
5646 /* Port DMAE comes here */ 6556 /* Port DMAE comes here */
5647 6557
6558 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6559 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6560 /* add SPIO 5 to group 0 */
6561 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6562 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6563 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6564 break;
6565
6566 default:
6567 break;
6568 }
6569
5648 bnx2x_link_reset(bp); 6570 bnx2x_link_reset(bp);
5649 6571
5650 /* Reset pciex errors for debug */ 6572 /* Reset PCIE errors for debug */
5651 REG_WR(bp, 0x2114, 0xffffffff); 6573 REG_WR(bp, 0x2114, 0xffffffff);
5652 REG_WR(bp, 0x2120, 0xffffffff); 6574 REG_WR(bp, 0x2120, 0xffffffff);
5653 REG_WR(bp, 0x2814, 0xffffffff); 6575 REG_WR(bp, 0x2814, 0xffffffff);
@@ -5669,9 +6591,9 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5669 port = bp->port; 6591 port = bp->port;
5670 6592
5671 bp->fw_drv_pulse_wr_seq = 6593 bp->fw_drv_pulse_wr_seq =
5672 (SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) & 6594 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
5673 DRV_PULSE_SEQ_MASK); 6595 DRV_PULSE_SEQ_MASK);
5674 bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param); 6596 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
5675 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n", 6597 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
5676 bp->fw_drv_pulse_wr_seq, bp->fw_mb); 6598 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
5677 } else { 6599 } else {
@@ -5681,16 +6603,15 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode)
5681 return 0; 6603 return 0;
5682} 6604}
5683 6605
5684 6606/* send the MCP a request, block until there is a reply */
5685/* send the MCP a request, block untill there is a reply */
5686static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) 6607static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5687{ 6608{
5688 u32 rc = 0;
5689 u32 seq = ++bp->fw_seq;
5690 int port = bp->port; 6609 int port = bp->port;
6610 u32 seq = ++bp->fw_seq;
6611 u32 rc = 0;
5691 6612
5692 SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq); 6613 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
5693 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq); 6614 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5694 6615
5695 /* let the FW do it's magic ... */ 6616 /* let the FW do it's magic ... */
5696 msleep(100); /* TBD */ 6617 msleep(100); /* TBD */
@@ -5698,19 +6619,20 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5698 if (CHIP_REV_IS_SLOW(bp)) 6619 if (CHIP_REV_IS_SLOW(bp))
5699 msleep(900); 6620 msleep(900);
5700 6621
5701 rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header); 6622 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
5702
5703 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); 6623 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
5704 6624
5705 /* is this a reply to our command? */ 6625 /* is this a reply to our command? */
5706 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 6626 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5707 rc &= FW_MSG_CODE_MASK; 6627 rc &= FW_MSG_CODE_MASK;
6628
5708 } else { 6629 } else {
5709 /* FW BUG! */ 6630 /* FW BUG! */
5710 BNX2X_ERR("FW failed to respond!\n"); 6631 BNX2X_ERR("FW failed to respond!\n");
5711 bnx2x_fw_dump(bp); 6632 bnx2x_fw_dump(bp);
5712 rc = 0; 6633 rc = 0;
5713 } 6634 }
6635
5714 return rc; 6636 return rc;
5715} 6637}
5716 6638
@@ -5869,7 +6791,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
5869 for (i = 0; i < 16*1024; i += 64) 6791 for (i = 0; i < 16*1024; i += 64)
5870 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; 6792 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5871 6793
5872 /* now sixup the last line in the block to point to the next block */ 6794 /* now fixup the last line in the block to point to the next block */
5873 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping; 6795 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5874 6796
5875 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */ 6797 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
@@ -5950,22 +6872,19 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5950 int i; 6872 int i;
5951 6873
5952 free_irq(bp->msix_table[0].vector, bp->dev); 6874 free_irq(bp->msix_table[0].vector, bp->dev);
5953 DP(NETIF_MSG_IFDOWN, "rleased sp irq (%d)\n", 6875 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5954 bp->msix_table[0].vector); 6876 bp->msix_table[0].vector);
5955 6877
5956 for_each_queue(bp, i) { 6878 for_each_queue(bp, i) {
5957 DP(NETIF_MSG_IFDOWN, "about to rlease fp #%d->%d irq " 6879 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5958 "state(%x)\n", i, bp->msix_table[i + 1].vector, 6880 "state(%x)\n", i, bp->msix_table[i + 1].vector,
5959 bnx2x_fp(bp, i, state)); 6881 bnx2x_fp(bp, i, state));
5960 6882
5961 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) { 6883 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5962 6884 BNX2X_ERR("IRQ of fp #%d being freed while "
5963 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]); 6885 "state != closed\n", i);
5964 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
5965
5966 } else
5967 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
5968 6886
6887 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
5969 } 6888 }
5970 6889
5971} 6890}
@@ -5995,7 +6914,7 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
5995 6914
5996 if (pci_enable_msix(bp->pdev, &bp->msix_table[0], 6915 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997 bp->num_queues + 1)){ 6916 bp->num_queues + 1)){
5998 BNX2X_ERR("failed to enable msix\n"); 6917 BNX2X_LOG("failed to enable MSI-X\n");
5999 return -1; 6918 return -1;
6000 6919
6001 } 6920 }
@@ -6010,11 +6929,8 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6010static int bnx2x_req_msix_irqs(struct bnx2x *bp) 6929static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6011{ 6930{
6012 6931
6013
6014 int i, rc; 6932 int i, rc;
6015 6933
6016 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6017
6018 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, 6934 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6019 bp->dev->name, bp->dev); 6935 bp->dev->name, bp->dev);
6020 6936
@@ -6029,7 +6945,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6029 bp->dev->name, &bp->fp[i]); 6945 bp->dev->name, &bp->fp[i]);
6030 6946
6031 if (rc) { 6947 if (rc) {
6032 BNX2X_ERR("request fp #%d irq failed\n", i); 6948 BNX2X_ERR("request fp #%d irq failed "
6949 "rc %d\n", i, rc);
6033 bnx2x_free_msix_irqs(bp); 6950 bnx2x_free_msix_irqs(bp);
6034 return -EBUSY; 6951 return -EBUSY;
6035 } 6952 }
@@ -6109,8 +7026,8 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6109 /* can take a while if any port is running */ 7026 /* can take a while if any port is running */
6110 int timeout = 500; 7027 int timeout = 500;
6111 7028
6112 /* DP("waiting for state to become %d on IDX [%d]\n", 7029 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6113 state, sb_idx); */ 7030 poll ? "polling" : "waiting", state, idx);
6114 7031
6115 might_sleep(); 7032 might_sleep();
6116 7033
@@ -6128,7 +7045,7 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6128 7045
6129 mb(); /* state is changed by bnx2x_sp_event()*/ 7046 mb(); /* state is changed by bnx2x_sp_event()*/
6130 7047
6131 if (*state_p != state) 7048 if (*state_p == state)
6132 return 0; 7049 return 0;
6133 7050
6134 timeout--; 7051 timeout--;
@@ -6136,17 +7053,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6136 7053
6137 } 7054 }
6138 7055
6139
6140 /* timeout! */ 7056 /* timeout! */
6141 BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx); 7057 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6142 return -EBUSY; 7058 poll ? "polling" : "waiting", state, idx);
6143 7059
7060 return -EBUSY;
6144} 7061}
6145 7062
6146static int bnx2x_setup_leading(struct bnx2x *bp) 7063static int bnx2x_setup_leading(struct bnx2x *bp)
6147{ 7064{
6148 7065
6149 /* reset IGU staae */ 7066 /* reset IGU state */
6150 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 7067 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6151 7068
6152 /* SETUP ramrod */ 7069 /* SETUP ramrod */
@@ -6162,12 +7079,13 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6162 /* reset IGU state */ 7079 /* reset IGU state */
6163 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 7080 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6164 7081
7082 /* SETUP ramrod */
6165 bp->fp[index].state = BNX2X_FP_STATE_OPENING; 7083 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6166 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0); 7084 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6167 7085
6168 /* Wait for completion */ 7086 /* Wait for completion */
6169 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, 7087 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6170 &(bp->fp[index].state), 1); 7088 &(bp->fp[index].state), 0);
6171 7089
6172} 7090}
6173 7091
@@ -6177,8 +7095,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev);
6177 7095
6178static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) 7096static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6179{ 7097{
6180 int rc; 7098 u32 load_code;
6181 int i = 0; 7099 int i;
6182 7100
6183 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 7101 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6184 7102
@@ -6188,26 +7106,28 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6188 initialized, otherwise - not. 7106 initialized, otherwise - not.
6189 */ 7107 */
6190 if (!nomcp) { 7108 if (!nomcp) {
6191 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 7109 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6192 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) { 7110 if (!load_code) {
7111 BNX2X_ERR("MCP response failure, unloading\n");
7112 return -EBUSY;
7113 }
7114 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7115 BNX2X_ERR("MCP refused load request, unloading\n");
6193 return -EBUSY; /* other port in diagnostic mode */ 7116 return -EBUSY; /* other port in diagnostic mode */
6194 } 7117 }
6195 } else { 7118 } else {
6196 rc = FW_MSG_CODE_DRV_LOAD_COMMON; 7119 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6197 } 7120 }
6198 7121
6199 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
6200
6201 /* if we can't use msix we only need one fp, 7122 /* if we can't use msix we only need one fp,
6202 * so try to enable msix with the requested number of fp's 7123 * so try to enable msix with the requested number of fp's
6203 * and fallback to inta with one fp 7124 * and fallback to inta with one fp
6204 */ 7125 */
6205 if (req_irq) { 7126 if (req_irq) {
6206
6207 if (use_inta) { 7127 if (use_inta) {
6208 bp->num_queues = 1; 7128 bp->num_queues = 1;
6209 } else { 7129 } else {
6210 if (use_multi > 1 && use_multi <= 16) 7130 if ((use_multi > 1) && (use_multi <= 16))
6211 /* user requested number */ 7131 /* user requested number */
6212 bp->num_queues = use_multi; 7132 bp->num_queues = use_multi;
6213 else if (use_multi == 1) 7133 else if (use_multi == 1)
@@ -6216,15 +7136,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6216 bp->num_queues = 1; 7136 bp->num_queues = 1;
6217 7137
6218 if (bnx2x_enable_msix(bp)) { 7138 if (bnx2x_enable_msix(bp)) {
6219 /* faild to enable msix */ 7139 /* failed to enable msix */
6220 bp->num_queues = 1; 7140 bp->num_queues = 1;
6221 if (use_multi) 7141 if (use_multi)
6222 BNX2X_ERR("Muti requested but failed" 7142 BNX2X_ERR("Multi requested but failed"
6223 " to enable MSI-X\n"); 7143 " to enable MSI-X\n");
6224 } 7144 }
6225 } 7145 }
6226 } 7146 }
6227 7147
7148 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7149
6228 if (bnx2x_alloc_mem(bp)) 7150 if (bnx2x_alloc_mem(bp))
6229 return -ENOMEM; 7151 return -ENOMEM;
6230 7152
@@ -6232,13 +7154,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6232 if (bp->flags & USING_MSIX_FLAG) { 7154 if (bp->flags & USING_MSIX_FLAG) {
6233 if (bnx2x_req_msix_irqs(bp)) { 7155 if (bnx2x_req_msix_irqs(bp)) {
6234 pci_disable_msix(bp->pdev); 7156 pci_disable_msix(bp->pdev);
6235 goto out_error; 7157 goto load_error;
6236 } 7158 }
6237 7159
6238 } else { 7160 } else {
6239 if (bnx2x_req_irq(bp)) { 7161 if (bnx2x_req_irq(bp)) {
6240 BNX2X_ERR("IRQ request failed, aborting\n"); 7162 BNX2X_ERR("IRQ request failed, aborting\n");
6241 goto out_error; 7163 goto load_error;
6242 } 7164 }
6243 } 7165 }
6244 } 7166 }
@@ -6249,31 +7171,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6249 7171
6250 7172
6251 /* Initialize HW */ 7173 /* Initialize HW */
6252 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) { 7174 if (bnx2x_function_init(bp,
7175 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
6253 BNX2X_ERR("HW init failed, aborting\n"); 7176 BNX2X_ERR("HW init failed, aborting\n");
6254 goto out_error; 7177 goto load_error;
6255 } 7178 }
6256 7179
6257 7180
6258 atomic_set(&bp->intr_sem, 0); 7181 atomic_set(&bp->intr_sem, 0);
6259 7182
6260 /* Reenable SP tasklet */
6261 /*if (bp->sp_task_en) { */
6262 /* tasklet_enable(&bp->sp_task);*/
6263 /*} else { */
6264 /* bp->sp_task_en = 1; */
6265 /*} */
6266 7183
6267 /* Setup NIC internals and enable interrupts */ 7184 /* Setup NIC internals and enable interrupts */
6268 bnx2x_nic_init(bp); 7185 bnx2x_nic_init(bp);
6269 7186
6270 /* Send LOAD_DONE command to MCP */ 7187 /* Send LOAD_DONE command to MCP */
6271 if (!nomcp) { 7188 if (!nomcp) {
6272 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 7189 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6273 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc); 7190 if (!load_code) {
6274 if (!rc) {
6275 BNX2X_ERR("MCP response failure, unloading\n"); 7191 BNX2X_ERR("MCP response failure, unloading\n");
6276 goto int_disable; 7192 goto load_int_disable;
6277 } 7193 }
6278 } 7194 }
6279 7195
@@ -6285,11 +7201,11 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6285 napi_enable(&bnx2x_fp(bp, i, napi)); 7201 napi_enable(&bnx2x_fp(bp, i, napi));
6286 7202
6287 if (bnx2x_setup_leading(bp)) 7203 if (bnx2x_setup_leading(bp))
6288 goto stop_netif; 7204 goto load_stop_netif;
6289 7205
6290 for_each_nondefault_queue(bp, i) 7206 for_each_nondefault_queue(bp, i)
6291 if (bnx2x_setup_multi(bp, i)) 7207 if (bnx2x_setup_multi(bp, i))
6292 goto stop_netif; 7208 goto load_stop_netif;
6293 7209
6294 bnx2x_set_mac_addr(bp); 7210 bnx2x_set_mac_addr(bp);
6295 7211
@@ -6313,42 +7229,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6313 7229
6314 return 0; 7230 return 0;
6315 7231
6316stop_netif: 7232load_stop_netif:
6317 for_each_queue(bp, i) 7233 for_each_queue(bp, i)
6318 napi_disable(&bnx2x_fp(bp, i, napi)); 7234 napi_disable(&bnx2x_fp(bp, i, napi));
6319 7235
6320int_disable: 7236load_int_disable:
6321 bnx2x_disable_int_sync(bp); 7237 bnx2x_int_disable_sync(bp);
6322 7238
6323 bnx2x_free_skbs(bp); 7239 bnx2x_free_skbs(bp);
6324 bnx2x_free_irq(bp); 7240 bnx2x_free_irq(bp);
6325 7241
6326out_error: 7242load_error:
6327 bnx2x_free_mem(bp); 7243 bnx2x_free_mem(bp);
6328 7244
6329 /* TBD we really need to reset the chip 7245 /* TBD we really need to reset the chip
6330 if we want to recover from this */ 7246 if we want to recover from this */
6331 return rc; 7247 return -EBUSY;
6332} 7248}
6333 7249
6334static void bnx2x_netif_stop(struct bnx2x *bp)
6335{
6336 int i;
6337
6338 bp->rx_mode = BNX2X_RX_MODE_NONE;
6339 bnx2x_set_storm_rx_mode(bp);
6340
6341 bnx2x_disable_int_sync(bp);
6342 bnx2x_link_reset(bp);
6343
6344 for_each_queue(bp, i)
6345 napi_disable(&bnx2x_fp(bp, i, napi));
6346
6347 if (netif_running(bp->dev)) {
6348 netif_tx_disable(bp->dev);
6349 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6350 }
6351}
6352 7250
6353static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 7251static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6354{ 7252{
@@ -6401,20 +7299,20 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6401 7299
6402 int rc; 7300 int rc;
6403 7301
6404 /* halt the connnection */ 7302 /* halt the connection */
6405 bp->fp[index].state = BNX2X_FP_STATE_HALTING; 7303 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6406 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); 7304 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6407 7305
6408 7306
6409 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, 7307 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6410 &(bp->fp[index].state), 1); 7308 &(bp->fp[index].state), 1);
6411 if (rc) /* timout */ 7309 if (rc) /* timeout */
6412 return rc; 7310 return rc;
6413 7311
6414 /* delete cfc entry */ 7312 /* delete cfc entry */
6415 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); 7313 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6416 7314
6417 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index, 7315 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6418 &(bp->fp[index].state), 1); 7316 &(bp->fp[index].state), 1);
6419 7317
6420} 7318}
@@ -6422,8 +7320,8 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6422 7320
6423static void bnx2x_stop_leading(struct bnx2x *bp) 7321static void bnx2x_stop_leading(struct bnx2x *bp)
6424{ 7322{
6425 7323 u16 dsb_sp_prod_idx;
6426 /* if the other port is hadling traffic, 7324 /* if the other port is handling traffic,
6427 this can take a lot of time */ 7325 this can take a lot of time */
6428 int timeout = 500; 7326 int timeout = 500;
6429 7327
@@ -6437,52 +7335,71 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6437 &(bp->fp[0].state), 1)) 7335 &(bp->fp[0].state), 1))
6438 return; 7336 return;
6439 7337
6440 bp->dsb_sp_prod_idx = *bp->dsb_sp_prod; 7338 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6441 7339
6442 /* Send CFC_DELETE ramrod */ 7340 /* Send PORT_DELETE ramrod */
6443 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1); 7341 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6444 7342
6445 /* 7343 /* Wait for completion to arrive on default status block
6446 Wait for completion.
6447 we are going to reset the chip anyway 7344 we are going to reset the chip anyway
6448 so there is not much to do if this times out 7345 so there is not much to do if this times out
6449 */ 7346 */
6450 while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) { 7347 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
6451 timeout--; 7348 timeout--;
6452 msleep(1); 7349 msleep(1);
6453 } 7350 }
6454 7351 if (!timeout) {
7352 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
7353 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7354 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7355 }
7356 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7357 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6455} 7358}
6456 7359
6457static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq) 7360
7361static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
6458{ 7362{
6459 u32 reset_code = 0; 7363 u32 reset_code = 0;
6460 int rc; 7364 int i, timeout;
6461 int i;
6462 7365
6463 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 7366 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6464 7367
6465 /* Calling flush_scheduled_work() may deadlock because 7368 del_timer_sync(&bp->timer);
6466 * linkwatch_event() may be on the workqueue and it will try to get
6467 * the rtnl_lock which we are holding.
6468 */
6469 7369
6470 while (bp->in_reset_task) 7370 bp->rx_mode = BNX2X_RX_MODE_NONE;
6471 msleep(1); 7371 bnx2x_set_storm_rx_mode(bp);
6472 7372
6473 /* Delete the timer: do it before disabling interrupts, as it 7373 if (netif_running(bp->dev)) {
6474 may be stil STAT_QUERY ramrod pending after stopping the timer */ 7374 netif_tx_disable(bp->dev);
6475 del_timer_sync(&bp->timer); 7375 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7376 }
7377
7378 /* Wait until all fast path tasks complete */
7379 for_each_queue(bp, i) {
7380 struct bnx2x_fastpath *fp = &bp->fp[i];
7381
7382 timeout = 1000;
7383 while (bnx2x_has_work(fp) && (timeout--))
7384 msleep(1);
7385 if (!timeout)
7386 BNX2X_ERR("timeout waiting for queue[%d]\n", i);
7387 }
6476 7388
6477 /* Wait until stat ramrod returns and all SP tasks complete */ 7389 /* Wait until stat ramrod returns and all SP tasks complete */
6478 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING)) 7390 timeout = 1000;
7391 while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
7392 (timeout--))
6479 msleep(1); 7393 msleep(1);
6480 7394
6481 /* Stop fast path, disable MAC, disable interrupts, disable napi */ 7395 for_each_queue(bp, i)
6482 bnx2x_netif_stop(bp); 7396 napi_disable(&bnx2x_fp(bp, i, napi));
7397 /* Disable interrupts after Tx and Rx are disabled on stack level */
7398 bnx2x_int_disable_sync(bp);
6483 7399
6484 if (bp->flags & NO_WOL_FLAG) 7400 if (bp->flags & NO_WOL_FLAG)
6485 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 7401 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7402
6486 else if (bp->wol) { 7403 else if (bp->wol) {
6487 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1; 7404 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
6488 u8 *mac_addr = bp->dev->dev_addr; 7405 u8 *mac_addr = bp->dev->dev_addr;
@@ -6499,28 +7416,37 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
6499 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); 7416 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
6500 7417
6501 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 7418 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7419
6502 } else 7420 } else
6503 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7421 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6504 7422
7423 /* Close multi and leading connections */
6505 for_each_nondefault_queue(bp, i) 7424 for_each_nondefault_queue(bp, i)
6506 if (bnx2x_stop_multi(bp, i)) 7425 if (bnx2x_stop_multi(bp, i))
6507 goto error; 7426 goto unload_error;
6508
6509 7427
6510 bnx2x_stop_leading(bp); 7428 bnx2x_stop_leading(bp);
7429 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
7430 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
7431 DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
7432 "state 0x%x fp[0].state 0x%x",
7433 bp->state, bp->fp[0].state);
7434 }
7435
7436unload_error:
7437 bnx2x_link_reset(bp);
6511 7438
6512error:
6513 if (!nomcp) 7439 if (!nomcp)
6514 rc = bnx2x_fw_command(bp, reset_code); 7440 reset_code = bnx2x_fw_command(bp, reset_code);
6515 else 7441 else
6516 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON; 7442 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6517 7443
6518 /* Release IRQs */ 7444 /* Release IRQs */
6519 if (fre_irq) 7445 if (free_irq)
6520 bnx2x_free_irq(bp); 7446 bnx2x_free_irq(bp);
6521 7447
6522 /* Reset the chip */ 7448 /* Reset the chip */
6523 bnx2x_reset_chip(bp, rc); 7449 bnx2x_reset_chip(bp, reset_code);
6524 7450
6525 /* Report UNLOAD_DONE to MCP */ 7451 /* Report UNLOAD_DONE to MCP */
6526 if (!nomcp) 7452 if (!nomcp)
@@ -6531,8 +7457,7 @@ error:
6531 bnx2x_free_mem(bp); 7457 bnx2x_free_mem(bp);
6532 7458
6533 bp->state = BNX2X_STATE_CLOSED; 7459 bp->state = BNX2X_STATE_CLOSED;
6534 /* Set link down */ 7460
6535 bp->link_up = 0;
6536 netif_carrier_off(bp->dev); 7461 netif_carrier_off(bp->dev);
6537 7462
6538 return 0; 7463 return 0;
@@ -6568,7 +7493,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6568 SUPPORTED_100baseT_Half | 7493 SUPPORTED_100baseT_Half |
6569 SUPPORTED_100baseT_Full | 7494 SUPPORTED_100baseT_Full |
6570 SUPPORTED_1000baseT_Full | 7495 SUPPORTED_1000baseT_Full |
6571 SUPPORTED_2500baseT_Full | 7496 SUPPORTED_2500baseX_Full |
6572 SUPPORTED_TP | SUPPORTED_FIBRE | 7497 SUPPORTED_TP | SUPPORTED_FIBRE |
6573 SUPPORTED_Autoneg | 7498 SUPPORTED_Autoneg |
6574 SUPPORTED_Pause | 7499 SUPPORTED_Pause |
@@ -6581,10 +7506,10 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6581 7506
6582 bp->phy_flags |= PHY_SGMII_FLAG; 7507 bp->phy_flags |= PHY_SGMII_FLAG;
6583 7508
6584 bp->supported |= (/* SUPPORTED_10baseT_Half | 7509 bp->supported |= (SUPPORTED_10baseT_Half |
6585 SUPPORTED_10baseT_Full | 7510 SUPPORTED_10baseT_Full |
6586 SUPPORTED_100baseT_Half | 7511 SUPPORTED_100baseT_Half |
6587 SUPPORTED_100baseT_Full |*/ 7512 SUPPORTED_100baseT_Full |
6588 SUPPORTED_1000baseT_Full | 7513 SUPPORTED_1000baseT_Full |
6589 SUPPORTED_TP | SUPPORTED_FIBRE | 7514 SUPPORTED_TP | SUPPORTED_FIBRE |
6590 SUPPORTED_Autoneg | 7515 SUPPORTED_Autoneg |
@@ -6620,7 +7545,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6620 SUPPORTED_100baseT_Half | 7545 SUPPORTED_100baseT_Half |
6621 SUPPORTED_100baseT_Full | 7546 SUPPORTED_100baseT_Full |
6622 SUPPORTED_1000baseT_Full | 7547 SUPPORTED_1000baseT_Full |
6623 SUPPORTED_2500baseT_Full | 7548 SUPPORTED_2500baseX_Full |
6624 SUPPORTED_10000baseT_Full | 7549 SUPPORTED_10000baseT_Full |
6625 SUPPORTED_TP | SUPPORTED_FIBRE | 7550 SUPPORTED_TP | SUPPORTED_FIBRE |
6626 SUPPORTED_Autoneg | 7551 SUPPORTED_Autoneg |
@@ -6629,12 +7554,46 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6629 break; 7554 break;
6630 7555
6631 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 7556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7557 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7558 ext_phy_type);
7559
7560 bp->supported |= (SUPPORTED_10000baseT_Full |
7561 SUPPORTED_FIBRE |
7562 SUPPORTED_Pause |
7563 SUPPORTED_Asym_Pause);
7564 break;
7565
6632 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 7566 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6633 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705/6)\n", 7567 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7568 ext_phy_type);
7569
7570 bp->supported |= (SUPPORTED_10000baseT_Full |
7571 SUPPORTED_1000baseT_Full |
7572 SUPPORTED_Autoneg |
7573 SUPPORTED_FIBRE |
7574 SUPPORTED_Pause |
7575 SUPPORTED_Asym_Pause);
7576 break;
7577
7578 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7579 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6634 ext_phy_type); 7580 ext_phy_type);
6635 7581
6636 bp->supported |= (SUPPORTED_10000baseT_Full | 7582 bp->supported |= (SUPPORTED_10000baseT_Full |
7583 SUPPORTED_1000baseT_Full |
6637 SUPPORTED_FIBRE | 7584 SUPPORTED_FIBRE |
7585 SUPPORTED_Autoneg |
7586 SUPPORTED_Pause |
7587 SUPPORTED_Asym_Pause);
7588 break;
7589
7590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7591 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7592 ext_phy_type);
7593
7594 bp->supported |= (SUPPORTED_10000baseT_Full |
7595 SUPPORTED_TP |
7596 SUPPORTED_Autoneg |
6638 SUPPORTED_Pause | 7597 SUPPORTED_Pause |
6639 SUPPORTED_Asym_Pause); 7598 SUPPORTED_Asym_Pause);
6640 break; 7599 break;
@@ -6691,7 +7650,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6691 SUPPORTED_1000baseT_Full); 7650 SUPPORTED_1000baseT_Full);
6692 7651
6693 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) 7652 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6694 bp->supported &= ~SUPPORTED_2500baseT_Full; 7653 bp->supported &= ~SUPPORTED_2500baseX_Full;
6695 7654
6696 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) 7655 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6697 bp->supported &= ~SUPPORTED_10000baseT_Full; 7656 bp->supported &= ~SUPPORTED_10000baseT_Full;
@@ -6711,13 +7670,8 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6711 bp->req_line_speed = 0; 7670 bp->req_line_speed = 0;
6712 bp->advertising = bp->supported; 7671 bp->advertising = bp->supported;
6713 } else { 7672 } else {
6714 u32 ext_phy_type; 7673 if (XGXS_EXT_PHY_TYPE(bp) ==
6715 7674 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
6716 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
6717 if ((ext_phy_type ==
6718 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6719 (ext_phy_type ==
6720 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6721 /* force 10G, no AN */ 7675 /* force 10G, no AN */
6722 bp->req_line_speed = SPEED_10000; 7676 bp->req_line_speed = SPEED_10000;
6723 bp->advertising = 7677 bp->advertising =
@@ -6734,8 +7688,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6734 break; 7688 break;
6735 7689
6736 case PORT_FEATURE_LINK_SPEED_10M_FULL: 7690 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6737 if (bp->speed_cap_mask & 7691 if (bp->supported & SUPPORTED_10baseT_Full) {
6738 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
6739 bp->req_line_speed = SPEED_10; 7692 bp->req_line_speed = SPEED_10;
6740 bp->advertising = (ADVERTISED_10baseT_Full | 7693 bp->advertising = (ADVERTISED_10baseT_Full |
6741 ADVERTISED_TP); 7694 ADVERTISED_TP);
@@ -6749,8 +7702,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6749 break; 7702 break;
6750 7703
6751 case PORT_FEATURE_LINK_SPEED_10M_HALF: 7704 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6752 if (bp->speed_cap_mask & 7705 if (bp->supported & SUPPORTED_10baseT_Half) {
6753 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
6754 bp->req_line_speed = SPEED_10; 7706 bp->req_line_speed = SPEED_10;
6755 bp->req_duplex = DUPLEX_HALF; 7707 bp->req_duplex = DUPLEX_HALF;
6756 bp->advertising = (ADVERTISED_10baseT_Half | 7708 bp->advertising = (ADVERTISED_10baseT_Half |
@@ -6765,8 +7717,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6765 break; 7717 break;
6766 7718
6767 case PORT_FEATURE_LINK_SPEED_100M_FULL: 7719 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6768 if (bp->speed_cap_mask & 7720 if (bp->supported & SUPPORTED_100baseT_Full) {
6769 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
6770 bp->req_line_speed = SPEED_100; 7721 bp->req_line_speed = SPEED_100;
6771 bp->advertising = (ADVERTISED_100baseT_Full | 7722 bp->advertising = (ADVERTISED_100baseT_Full |
6772 ADVERTISED_TP); 7723 ADVERTISED_TP);
@@ -6780,8 +7731,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6780 break; 7731 break;
6781 7732
6782 case PORT_FEATURE_LINK_SPEED_100M_HALF: 7733 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6783 if (bp->speed_cap_mask & 7734 if (bp->supported & SUPPORTED_100baseT_Half) {
6784 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
6785 bp->req_line_speed = SPEED_100; 7735 bp->req_line_speed = SPEED_100;
6786 bp->req_duplex = DUPLEX_HALF; 7736 bp->req_duplex = DUPLEX_HALF;
6787 bp->advertising = (ADVERTISED_100baseT_Half | 7737 bp->advertising = (ADVERTISED_100baseT_Half |
@@ -6796,8 +7746,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6796 break; 7746 break;
6797 7747
6798 case PORT_FEATURE_LINK_SPEED_1G: 7748 case PORT_FEATURE_LINK_SPEED_1G:
6799 if (bp->speed_cap_mask & 7749 if (bp->supported & SUPPORTED_1000baseT_Full) {
6800 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
6801 bp->req_line_speed = SPEED_1000; 7750 bp->req_line_speed = SPEED_1000;
6802 bp->advertising = (ADVERTISED_1000baseT_Full | 7751 bp->advertising = (ADVERTISED_1000baseT_Full |
6803 ADVERTISED_TP); 7752 ADVERTISED_TP);
@@ -6811,10 +7760,9 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6811 break; 7760 break;
6812 7761
6813 case PORT_FEATURE_LINK_SPEED_2_5G: 7762 case PORT_FEATURE_LINK_SPEED_2_5G:
6814 if (bp->speed_cap_mask & 7763 if (bp->supported & SUPPORTED_2500baseX_Full) {
6815 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) {
6816 bp->req_line_speed = SPEED_2500; 7764 bp->req_line_speed = SPEED_2500;
6817 bp->advertising = (ADVERTISED_2500baseT_Full | 7765 bp->advertising = (ADVERTISED_2500baseX_Full |
6818 ADVERTISED_TP); 7766 ADVERTISED_TP);
6819 } else { 7767 } else {
6820 BNX2X_ERR("NVRAM config error. " 7768 BNX2X_ERR("NVRAM config error. "
@@ -6828,15 +7776,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6828 case PORT_FEATURE_LINK_SPEED_10G_CX4: 7776 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6829 case PORT_FEATURE_LINK_SPEED_10G_KX4: 7777 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6830 case PORT_FEATURE_LINK_SPEED_10G_KR: 7778 case PORT_FEATURE_LINK_SPEED_10G_KR:
6831 if (!(bp->phy_flags & PHY_XGXS_FLAG)) { 7779 if (bp->supported & SUPPORTED_10000baseT_Full) {
6832 BNX2X_ERR("NVRAM config error. "
6833 "Invalid link_config 0x%x"
6834 " phy_flags 0x%x\n",
6835 bp->link_config, bp->phy_flags);
6836 return;
6837 }
6838 if (bp->speed_cap_mask &
6839 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
6840 bp->req_line_speed = SPEED_10000; 7780 bp->req_line_speed = SPEED_10000;
6841 bp->advertising = (ADVERTISED_10000baseT_Full | 7781 bp->advertising = (ADVERTISED_10000baseT_Full |
6842 ADVERTISED_FIBRE); 7782 ADVERTISED_FIBRE);
@@ -6863,43 +7803,13 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
6863 7803
6864 bp->req_flow_ctrl = (bp->link_config & 7804 bp->req_flow_ctrl = (bp->link_config &
6865 PORT_FEATURE_FLOW_CONTROL_MASK); 7805 PORT_FEATURE_FLOW_CONTROL_MASK);
6866 /* Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 7806 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
6867 switch (bp->req_flow_ctrl) { 7807 (bp->supported & SUPPORTED_Autoneg))
6868 case FLOW_CTRL_AUTO:
6869 bp->req_autoneg |= AUTONEG_FLOW_CTRL; 7808 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
6870 if (bp->dev->mtu <= 4500) {
6871 bp->pause_mode = PAUSE_BOTH;
6872 bp->advertising |= (ADVERTISED_Pause |
6873 ADVERTISED_Asym_Pause);
6874 } else {
6875 bp->pause_mode = PAUSE_ASYMMETRIC;
6876 bp->advertising |= ADVERTISED_Asym_Pause;
6877 }
6878 break;
6879
6880 case FLOW_CTRL_TX:
6881 bp->pause_mode = PAUSE_ASYMMETRIC;
6882 bp->advertising |= ADVERTISED_Asym_Pause;
6883 break;
6884
6885 case FLOW_CTRL_RX:
6886 case FLOW_CTRL_BOTH:
6887 bp->pause_mode = PAUSE_BOTH;
6888 bp->advertising |= (ADVERTISED_Pause |
6889 ADVERTISED_Asym_Pause);
6890 break;
6891 7809
6892 case FLOW_CTRL_NONE: 7810 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
6893 default: 7811 " advertising 0x%x\n",
6894 bp->pause_mode = PAUSE_NONE; 7812 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
6895 bp->advertising &= ~(ADVERTISED_Pause |
6896 ADVERTISED_Asym_Pause);
6897 break;
6898 }
6899 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x\n"
6900 KERN_INFO " pause_mode %d advertising 0x%x\n",
6901 bp->req_autoneg, bp->req_flow_ctrl,
6902 bp->pause_mode, bp->advertising);
6903} 7813}
6904 7814
6905static void bnx2x_get_hwinfo(struct bnx2x *bp) 7815static void bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -6933,15 +7843,15 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
6933 val = SHMEM_RD(bp, validity_map[port]); 7843 val = SHMEM_RD(bp, validity_map[port]);
6934 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7844 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6935 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 7845 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6936 BNX2X_ERR("MCP validity signature bad\n"); 7846 BNX2X_ERR("BAD MCP validity signature\n");
6937 7847
6938 bp->fw_seq = (SHMEM_RD(bp, drv_fw_mb[port].drv_mb_header) & 7848 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
6939 DRV_MSG_SEQ_NUMBER_MASK); 7849 DRV_MSG_SEQ_NUMBER_MASK);
6940 7850
6941 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 7851 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6942 7852 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6943 bp->serdes_config = 7853 bp->serdes_config =
6944 SHMEM_RD(bp, dev_info.port_hw_config[bp->port].serdes_config); 7854 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
6945 bp->lane_config = 7855 bp->lane_config =
6946 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); 7856 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6947 bp->ext_phy_config = 7857 bp->ext_phy_config =
@@ -6954,13 +7864,13 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
6954 bp->link_config = 7864 bp->link_config =
6955 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); 7865 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6956 7866
6957 BNX2X_DEV_INFO("hw_config (%08x) serdes_config (%08x)\n" 7867 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
6958 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n" 7868 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
6959 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)" 7869 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
6960 " fw_seq (%08x)\n", 7870 " fw_seq (%08x)\n",
6961 bp->hw_config, bp->serdes_config, bp->lane_config, 7871 bp->hw_config, bp->board, bp->serdes_config,
6962 bp->ext_phy_config, bp->speed_cap_mask, 7872 bp->lane_config, bp->ext_phy_config,
6963 bp->link_config, bp->fw_seq); 7873 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
6964 7874
6965 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK); 7875 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
6966 bnx2x_link_settings_supported(bp, switch_cfg); 7876 bnx2x_link_settings_supported(bp, switch_cfg);
@@ -7014,14 +7924,8 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp)
7014 return; 7924 return;
7015 7925
7016set_mac: /* only supposed to happen on emulation/FPGA */ 7926set_mac: /* only supposed to happen on emulation/FPGA */
7017 BNX2X_ERR("warning constant MAC workaround active\n"); 7927 BNX2X_ERR("warning rendom MAC workaround active\n");
7018 bp->dev->dev_addr[0] = 0; 7928 random_ether_addr(bp->dev->dev_addr);
7019 bp->dev->dev_addr[1] = 0x50;
7020 bp->dev->dev_addr[2] = 0xc2;
7021 bp->dev->dev_addr[3] = 0x2c;
7022 bp->dev->dev_addr[4] = 0x71;
7023 bp->dev->dev_addr[5] = port ? 0x0d : 0x0e;
7024
7025 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6); 7929 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7026 7930
7027} 7931}
@@ -7048,19 +7952,34 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7048 } 7952 }
7049 7953
7050 if (bp->phy_flags & PHY_XGXS_FLAG) { 7954 if (bp->phy_flags & PHY_XGXS_FLAG) {
7051 cmd->port = PORT_FIBRE; 7955 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7052 } else { 7956
7957 switch (ext_phy_type) {
7958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7960 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7961 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7962 cmd->port = PORT_FIBRE;
7963 break;
7964
7965 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7966 cmd->port = PORT_TP;
7967 break;
7968
7969 default:
7970 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7971 bp->ext_phy_config);
7972 }
7973 } else
7053 cmd->port = PORT_TP; 7974 cmd->port = PORT_TP;
7054 }
7055 7975
7056 cmd->phy_address = bp->phy_addr; 7976 cmd->phy_address = bp->phy_addr;
7057 cmd->transceiver = XCVR_INTERNAL; 7977 cmd->transceiver = XCVR_INTERNAL;
7058 7978
7059 if (bp->req_autoneg & AUTONEG_SPEED) { 7979 if (bp->req_autoneg & AUTONEG_SPEED)
7060 cmd->autoneg = AUTONEG_ENABLE; 7980 cmd->autoneg = AUTONEG_ENABLE;
7061 } else { 7981 else
7062 cmd->autoneg = AUTONEG_DISABLE; 7982 cmd->autoneg = AUTONEG_DISABLE;
7063 }
7064 7983
7065 cmd->maxtxpkt = 0; 7984 cmd->maxtxpkt = 0;
7066 cmd->maxrxpkt = 0; 7985 cmd->maxrxpkt = 0;
@@ -7091,8 +8010,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7091 8010
7092 switch (cmd->port) { 8011 switch (cmd->port) {
7093 case PORT_TP: 8012 case PORT_TP:
7094 if (!(bp->supported & SUPPORTED_TP)) 8013 if (!(bp->supported & SUPPORTED_TP)) {
8014 DP(NETIF_MSG_LINK, "TP not supported\n");
7095 return -EINVAL; 8015 return -EINVAL;
8016 }
7096 8017
7097 if (bp->phy_flags & PHY_XGXS_FLAG) { 8018 if (bp->phy_flags & PHY_XGXS_FLAG) {
7098 bnx2x_link_reset(bp); 8019 bnx2x_link_reset(bp);
@@ -7102,8 +8023,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7102 break; 8023 break;
7103 8024
7104 case PORT_FIBRE: 8025 case PORT_FIBRE:
7105 if (!(bp->supported & SUPPORTED_FIBRE)) 8026 if (!(bp->supported & SUPPORTED_FIBRE)) {
8027 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
7106 return -EINVAL; 8028 return -EINVAL;
8029 }
7107 8030
7108 if (!(bp->phy_flags & PHY_XGXS_FLAG)) { 8031 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7109 bnx2x_link_reset(bp); 8032 bnx2x_link_reset(bp);
@@ -7113,12 +8036,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7113 break; 8036 break;
7114 8037
7115 default: 8038 default:
8039 DP(NETIF_MSG_LINK, "Unknown port type\n");
7116 return -EINVAL; 8040 return -EINVAL;
7117 } 8041 }
7118 8042
7119 if (cmd->autoneg == AUTONEG_ENABLE) { 8043 if (cmd->autoneg == AUTONEG_ENABLE) {
7120 if (!(bp->supported & SUPPORTED_Autoneg)) 8044 if (!(bp->supported & SUPPORTED_Autoneg)) {
8045 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
7121 return -EINVAL; 8046 return -EINVAL;
8047 }
7122 8048
7123 /* advertise the requested speed and duplex if supported */ 8049 /* advertise the requested speed and duplex if supported */
7124 cmd->advertising &= bp->supported; 8050 cmd->advertising &= bp->supported;
@@ -7133,14 +8059,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7133 switch (cmd->speed) { 8059 switch (cmd->speed) {
7134 case SPEED_10: 8060 case SPEED_10:
7135 if (cmd->duplex == DUPLEX_FULL) { 8061 if (cmd->duplex == DUPLEX_FULL) {
7136 if (!(bp->supported & SUPPORTED_10baseT_Full)) 8062 if (!(bp->supported &
8063 SUPPORTED_10baseT_Full)) {
8064 DP(NETIF_MSG_LINK,
8065 "10M full not supported\n");
7137 return -EINVAL; 8066 return -EINVAL;
8067 }
7138 8068
7139 advertising = (ADVERTISED_10baseT_Full | 8069 advertising = (ADVERTISED_10baseT_Full |
7140 ADVERTISED_TP); 8070 ADVERTISED_TP);
7141 } else { 8071 } else {
7142 if (!(bp->supported & SUPPORTED_10baseT_Half)) 8072 if (!(bp->supported &
8073 SUPPORTED_10baseT_Half)) {
8074 DP(NETIF_MSG_LINK,
8075 "10M half not supported\n");
7143 return -EINVAL; 8076 return -EINVAL;
8077 }
7144 8078
7145 advertising = (ADVERTISED_10baseT_Half | 8079 advertising = (ADVERTISED_10baseT_Half |
7146 ADVERTISED_TP); 8080 ADVERTISED_TP);
@@ -7150,15 +8084,21 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7150 case SPEED_100: 8084 case SPEED_100:
7151 if (cmd->duplex == DUPLEX_FULL) { 8085 if (cmd->duplex == DUPLEX_FULL) {
7152 if (!(bp->supported & 8086 if (!(bp->supported &
7153 SUPPORTED_100baseT_Full)) 8087 SUPPORTED_100baseT_Full)) {
8088 DP(NETIF_MSG_LINK,
8089 "100M full not supported\n");
7154 return -EINVAL; 8090 return -EINVAL;
8091 }
7155 8092
7156 advertising = (ADVERTISED_100baseT_Full | 8093 advertising = (ADVERTISED_100baseT_Full |
7157 ADVERTISED_TP); 8094 ADVERTISED_TP);
7158 } else { 8095 } else {
7159 if (!(bp->supported & 8096 if (!(bp->supported &
7160 SUPPORTED_100baseT_Half)) 8097 SUPPORTED_100baseT_Half)) {
8098 DP(NETIF_MSG_LINK,
8099 "100M half not supported\n");
7161 return -EINVAL; 8100 return -EINVAL;
8101 }
7162 8102
7163 advertising = (ADVERTISED_100baseT_Half | 8103 advertising = (ADVERTISED_100baseT_Half |
7164 ADVERTISED_TP); 8104 ADVERTISED_TP);
@@ -7166,39 +8106,54 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7166 break; 8106 break;
7167 8107
7168 case SPEED_1000: 8108 case SPEED_1000:
7169 if (cmd->duplex != DUPLEX_FULL) 8109 if (cmd->duplex != DUPLEX_FULL) {
8110 DP(NETIF_MSG_LINK, "1G half not supported\n");
7170 return -EINVAL; 8111 return -EINVAL;
8112 }
7171 8113
7172 if (!(bp->supported & SUPPORTED_1000baseT_Full)) 8114 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8115 DP(NETIF_MSG_LINK, "1G full not supported\n");
7173 return -EINVAL; 8116 return -EINVAL;
8117 }
7174 8118
7175 advertising = (ADVERTISED_1000baseT_Full | 8119 advertising = (ADVERTISED_1000baseT_Full |
7176 ADVERTISED_TP); 8120 ADVERTISED_TP);
7177 break; 8121 break;
7178 8122
7179 case SPEED_2500: 8123 case SPEED_2500:
7180 if (cmd->duplex != DUPLEX_FULL) 8124 if (cmd->duplex != DUPLEX_FULL) {
8125 DP(NETIF_MSG_LINK,
8126 "2.5G half not supported\n");
7181 return -EINVAL; 8127 return -EINVAL;
8128 }
7182 8129
7183 if (!(bp->supported & SUPPORTED_2500baseT_Full)) 8130 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8131 DP(NETIF_MSG_LINK,
8132 "2.5G full not supported\n");
7184 return -EINVAL; 8133 return -EINVAL;
8134 }
7185 8135
7186 advertising = (ADVERTISED_2500baseT_Full | 8136 advertising = (ADVERTISED_2500baseX_Full |
7187 ADVERTISED_TP); 8137 ADVERTISED_TP);
7188 break; 8138 break;
7189 8139
7190 case SPEED_10000: 8140 case SPEED_10000:
7191 if (cmd->duplex != DUPLEX_FULL) 8141 if (cmd->duplex != DUPLEX_FULL) {
8142 DP(NETIF_MSG_LINK, "10G half not supported\n");
7192 return -EINVAL; 8143 return -EINVAL;
8144 }
7193 8145
7194 if (!(bp->supported & SUPPORTED_10000baseT_Full)) 8146 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8147 DP(NETIF_MSG_LINK, "10G full not supported\n");
7195 return -EINVAL; 8148 return -EINVAL;
8149 }
7196 8150
7197 advertising = (ADVERTISED_10000baseT_Full | 8151 advertising = (ADVERTISED_10000baseT_Full |
7198 ADVERTISED_FIBRE); 8152 ADVERTISED_FIBRE);
7199 break; 8153 break;
7200 8154
7201 default: 8155 default:
8156 DP(NETIF_MSG_LINK, "Unsupported speed\n");
7202 return -EINVAL; 8157 return -EINVAL;
7203 } 8158 }
7204 8159
@@ -7398,8 +8353,7 @@ static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7398static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val, 8353static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7399 u32 cmd_flags) 8354 u32 cmd_flags)
7400{ 8355{
7401 int rc; 8356 int count, i, rc;
7402 int count, i;
7403 u32 val; 8357 u32 val;
7404 8358
7405 /* build the command word */ 8359 /* build the command word */
@@ -7452,13 +8406,13 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7452 8406
7453 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 8407 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7454 DP(NETIF_MSG_NVM, 8408 DP(NETIF_MSG_NVM,
7455 "Invalid paramter: offset 0x%x buf_size 0x%x\n", 8409 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7456 offset, buf_size); 8410 offset, buf_size);
7457 return -EINVAL; 8411 return -EINVAL;
7458 } 8412 }
7459 8413
7460 if (offset + buf_size > bp->flash_size) { 8414 if (offset + buf_size > bp->flash_size) {
7461 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" 8415 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7462 " buf_size (0x%x) > flash_size (0x%x)\n", 8416 " buf_size (0x%x) > flash_size (0x%x)\n",
7463 offset, buf_size, bp->flash_size); 8417 offset, buf_size, bp->flash_size);
7464 return -EINVAL; 8418 return -EINVAL;
@@ -7519,8 +8473,7 @@ static int bnx2x_get_eeprom(struct net_device *dev,
7519static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, 8473static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7520 u32 cmd_flags) 8474 u32 cmd_flags)
7521{ 8475{
7522 int rc; 8476 int count, i, rc;
7523 int count, i;
7524 8477
7525 /* build the command word */ 8478 /* build the command word */
7526 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; 8479 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
@@ -7557,7 +8510,7 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7557 return rc; 8510 return rc;
7558} 8511}
7559 8512
7560#define BYTE_OFFSET(offset) (8 * (offset & 0x03)) 8513#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
7561 8514
7562static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, 8515static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7563 int buf_size) 8516 int buf_size)
@@ -7568,7 +8521,7 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7568 u32 val; 8521 u32 val;
7569 8522
7570 if (offset + buf_size > bp->flash_size) { 8523 if (offset + buf_size > bp->flash_size) {
7571 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" 8524 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7572 " buf_size (0x%x) > flash_size (0x%x)\n", 8525 " buf_size (0x%x) > flash_size (0x%x)\n",
7573 offset, buf_size, bp->flash_size); 8526 offset, buf_size, bp->flash_size);
7574 return -EINVAL; 8527 return -EINVAL;
@@ -7621,13 +8574,13 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
7621 8574
7622 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { 8575 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7623 DP(NETIF_MSG_NVM, 8576 DP(NETIF_MSG_NVM,
7624 "Invalid paramter: offset 0x%x buf_size 0x%x\n", 8577 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7625 offset, buf_size); 8578 offset, buf_size);
7626 return -EINVAL; 8579 return -EINVAL;
7627 } 8580 }
7628 8581
7629 if (offset + buf_size > bp->flash_size) { 8582 if (offset + buf_size > bp->flash_size) {
7630 DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" 8583 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7631 " buf_size (0x%x) > flash_size (0x%x)\n", 8584 " buf_size (0x%x) > flash_size (0x%x)\n",
7632 offset, buf_size, bp->flash_size); 8585 offset, buf_size, bp->flash_size);
7633 return -EINVAL; 8586 return -EINVAL;
@@ -7788,52 +8741,29 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
7788 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", 8741 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7789 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); 8742 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7790 8743
7791 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
7792 if (epause->autoneg) { 8744 if (epause->autoneg) {
7793 bp->req_autoneg |= AUTONEG_FLOW_CTRL; 8745 if (!(bp->supported & SUPPORTED_Autoneg)) {
7794 if (bp->dev->mtu <= 4500) { 8746 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
7795 bp->pause_mode = PAUSE_BOTH; 8747 return -EINVAL;
7796 bp->advertising |= (ADVERTISED_Pause |
7797 ADVERTISED_Asym_Pause);
7798 } else {
7799 bp->pause_mode = PAUSE_ASYMMETRIC;
7800 bp->advertising |= ADVERTISED_Asym_Pause;
7801 } 8748 }
7802 8749
7803 } else { 8750 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8751 } else
7804 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL; 8752 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
7805 8753
7806 if (epause->rx_pause) 8754 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
7807 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7808 if (epause->tx_pause)
7809 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7810
7811 switch (bp->req_flow_ctrl) {
7812 case FLOW_CTRL_AUTO:
7813 bp->req_flow_ctrl = FLOW_CTRL_NONE;
7814 bp->pause_mode = PAUSE_NONE;
7815 bp->advertising &= ~(ADVERTISED_Pause |
7816 ADVERTISED_Asym_Pause);
7817 break;
7818 8755
7819 case FLOW_CTRL_TX: 8756 if (epause->rx_pause)
7820 bp->pause_mode = PAUSE_ASYMMETRIC; 8757 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7821 bp->advertising |= ADVERTISED_Asym_Pause; 8758 if (epause->tx_pause)
7822 break; 8759 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7823 8760
7824 case FLOW_CTRL_RX: 8761 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
7825 case FLOW_CTRL_BOTH: 8762 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
7826 bp->pause_mode = PAUSE_BOTH; 8763 bp->req_flow_ctrl = FLOW_CTRL_NONE;
7827 bp->advertising |= (ADVERTISED_Pause |
7828 ADVERTISED_Asym_Pause);
7829 break;
7830 }
7831 }
7832 8764
7833 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n" 8765 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
7834 DP_LEVEL " pause_mode %d advertising 0x%x\n", 8766 bp->req_autoneg, bp->req_flow_ctrl);
7835 bp->req_autoneg, bp->req_flow_ctrl, bp->pause_mode,
7836 bp->advertising);
7837 8767
7838 bnx2x_stop_stats(bp); 8768 bnx2x_stop_stats(bp);
7839 bnx2x_link_initialize(bp); 8769 bnx2x_link_initialize(bp);
@@ -7906,81 +8836,87 @@ static void bnx2x_self_test(struct net_device *dev,
7906static struct { 8836static struct {
7907 char string[ETH_GSTRING_LEN]; 8837 char string[ETH_GSTRING_LEN];
7908} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = { 8838} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
7909 { "rx_bytes"}, /* 0 */ 8839 { "rx_bytes"},
7910 { "rx_error_bytes"}, /* 1 */ 8840 { "rx_error_bytes"},
7911 { "tx_bytes"}, /* 2 */ 8841 { "tx_bytes"},
7912 { "tx_error_bytes"}, /* 3 */ 8842 { "tx_error_bytes"},
7913 { "rx_ucast_packets"}, /* 4 */ 8843 { "rx_ucast_packets"},
7914 { "rx_mcast_packets"}, /* 5 */ 8844 { "rx_mcast_packets"},
7915 { "rx_bcast_packets"}, /* 6 */ 8845 { "rx_bcast_packets"},
7916 { "tx_ucast_packets"}, /* 7 */ 8846 { "tx_ucast_packets"},
7917 { "tx_mcast_packets"}, /* 8 */ 8847 { "tx_mcast_packets"},
7918 { "tx_bcast_packets"}, /* 9 */ 8848 { "tx_bcast_packets"},
7919 { "tx_mac_errors"}, /* 10 */ 8849 { "tx_mac_errors"}, /* 10 */
7920 { "tx_carrier_errors"}, /* 11 */ 8850 { "tx_carrier_errors"},
7921 { "rx_crc_errors"}, /* 12 */ 8851 { "rx_crc_errors"},
7922 { "rx_align_errors"}, /* 13 */ 8852 { "rx_align_errors"},
7923 { "tx_single_collisions"}, /* 14 */ 8853 { "tx_single_collisions"},
7924 { "tx_multi_collisions"}, /* 15 */ 8854 { "tx_multi_collisions"},
7925 { "tx_deferred"}, /* 16 */ 8855 { "tx_deferred"},
7926 { "tx_excess_collisions"}, /* 17 */ 8856 { "tx_excess_collisions"},
7927 { "tx_late_collisions"}, /* 18 */ 8857 { "tx_late_collisions"},
7928 { "tx_total_collisions"}, /* 19 */ 8858 { "tx_total_collisions"},
7929 { "rx_fragments"}, /* 20 */ 8859 { "rx_fragments"}, /* 20 */
7930 { "rx_jabbers"}, /* 21 */ 8860 { "rx_jabbers"},
7931 { "rx_undersize_packets"}, /* 22 */ 8861 { "rx_undersize_packets"},
7932 { "rx_oversize_packets"}, /* 23 */ 8862 { "rx_oversize_packets"},
7933 { "rx_xon_frames"}, /* 24 */ 8863 { "rx_xon_frames"},
7934 { "rx_xoff_frames"}, /* 25 */ 8864 { "rx_xoff_frames"},
7935 { "tx_xon_frames"}, /* 26 */ 8865 { "tx_xon_frames"},
7936 { "tx_xoff_frames"}, /* 27 */ 8866 { "tx_xoff_frames"},
7937 { "rx_mac_ctrl_frames"}, /* 28 */ 8867 { "rx_mac_ctrl_frames"},
7938 { "rx_filtered_packets"}, /* 29 */ 8868 { "rx_filtered_packets"},
7939 { "rx_discards"}, /* 30 */ 8869 { "rx_discards"}, /* 30 */
8870 { "brb_discard"},
8871 { "brb_truncate"},
8872 { "xxoverflow"}
7940}; 8873};
7941 8874
7942#define STATS_OFFSET32(offset_name) \ 8875#define STATS_OFFSET32(offset_name) \
7943 (offsetof(struct bnx2x_eth_stats, offset_name) / 4) 8876 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
7944 8877
7945static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = { 8878static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
7946 STATS_OFFSET32(total_bytes_received_hi), /* 0 */ 8879 STATS_OFFSET32(total_bytes_received_hi),
7947 STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */ 8880 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7948 STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */ 8881 STATS_OFFSET32(total_bytes_transmitted_hi),
7949 STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */ 8882 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7950 STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */ 8883 STATS_OFFSET32(total_unicast_packets_received_hi),
7951 STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */ 8884 STATS_OFFSET32(total_multicast_packets_received_hi),
7952 STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */ 8885 STATS_OFFSET32(total_broadcast_packets_received_hi),
7953 STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */ 8886 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
7954 STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */ 8887 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
7955 STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */ 8888 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
7956 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */ 8889 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
7957 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */ 8890 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7958 STATS_OFFSET32(crc_receive_errors), /* 12 */ 8891 STATS_OFFSET32(crc_receive_errors),
7959 STATS_OFFSET32(alignment_errors), /* 13 */ 8892 STATS_OFFSET32(alignment_errors),
7960 STATS_OFFSET32(single_collision_transmit_frames), /* 14 */ 8893 STATS_OFFSET32(single_collision_transmit_frames),
7961 STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */ 8894 STATS_OFFSET32(multiple_collision_transmit_frames),
7962 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */ 8895 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7963 STATS_OFFSET32(excessive_collision_frames), /* 17 */ 8896 STATS_OFFSET32(excessive_collision_frames),
7964 STATS_OFFSET32(late_collision_frames), /* 18 */ 8897 STATS_OFFSET32(late_collision_frames),
7965 STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */ 8898 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
7966 STATS_OFFSET32(runt_packets_received), /* 20 */ 8899 STATS_OFFSET32(runt_packets_received), /* 20 */
7967 STATS_OFFSET32(jabber_packets_received), /* 21 */ 8900 STATS_OFFSET32(jabber_packets_received),
7968 STATS_OFFSET32(error_runt_packets_received), /* 22 */ 8901 STATS_OFFSET32(error_runt_packets_received),
7969 STATS_OFFSET32(error_jabber_packets_received), /* 23 */ 8902 STATS_OFFSET32(error_jabber_packets_received),
7970 STATS_OFFSET32(pause_xon_frames_received), /* 24 */ 8903 STATS_OFFSET32(pause_xon_frames_received),
7971 STATS_OFFSET32(pause_xoff_frames_received), /* 25 */ 8904 STATS_OFFSET32(pause_xoff_frames_received),
7972 STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */ 8905 STATS_OFFSET32(pause_xon_frames_transmitted),
7973 STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */ 8906 STATS_OFFSET32(pause_xoff_frames_transmitted),
7974 STATS_OFFSET32(control_frames_received), /* 28 */ 8907 STATS_OFFSET32(control_frames_received),
7975 STATS_OFFSET32(mac_filter_discard), /* 29 */ 8908 STATS_OFFSET32(mac_filter_discard),
7976 STATS_OFFSET32(no_buff_discard), /* 30 */ 8909 STATS_OFFSET32(no_buff_discard), /* 30 */
8910 STATS_OFFSET32(brb_discard),
8911 STATS_OFFSET32(brb_truncate_discard),
8912 STATS_OFFSET32(xxoverflow_discard)
7977}; 8913};
7978 8914
7979static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = { 8915static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
7980 8, 0, 8, 0, 8, 8, 8, 8, 8, 8, 8916 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
7981 4, 0, 4, 4, 4, 4, 4, 4, 4, 4, 8917 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
7982 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 8918 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
7983 4, 8919 4, 4, 4, 4
7984}; 8920};
7985 8921
7986static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 8922static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -8138,9 +9074,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8138 * net_device service functions 9074 * net_device service functions
8139 */ 9075 */
8140 9076
8141/* Called with rtnl_lock from vlan functions and also netif_tx_lock 9077/* called with netif_tx_lock from set_multicast */
8142 * from set_multicast.
8143 */
8144static void bnx2x_set_rx_mode(struct net_device *dev) 9078static void bnx2x_set_rx_mode(struct net_device *dev)
8145{ 9079{
8146 struct bnx2x *bp = netdev_priv(dev); 9080 struct bnx2x *bp = netdev_priv(dev);
@@ -8314,7 +9248,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8314 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 9248 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
8315 tx_bd->general_data |= 1; /* header nbd */ 9249 tx_bd->general_data |= 1; /* header nbd */
8316 9250
8317 /* remeber the first bd of the packet */ 9251 /* remember the first bd of the packet */
8318 tx_buf->first_bd = bd_prod; 9252 tx_buf->first_bd = bd_prod;
8319 9253
8320 DP(NETIF_MSG_TX_QUEUED, 9254 DP(NETIF_MSG_TX_QUEUED,
@@ -8334,7 +9268,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8334 9268
8335 /* for now NS flag is not used in Linux */ 9269 /* for now NS flag is not used in Linux */
8336 pbd->global_data = (len | 9270 pbd->global_data = (len |
8337 ((skb->protocol == ETH_P_8021Q) << 9271 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
8338 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); 9272 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
8339 pbd->ip_hlen = ip_hdrlen(skb) / 2; 9273 pbd->ip_hlen = ip_hdrlen(skb) / 2;
8340 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen); 9274 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
@@ -8343,7 +9277,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8343 9277
8344 tx_bd->bd_flags.as_bitfield |= 9278 tx_bd->bd_flags.as_bitfield |=
8345 ETH_TX_BD_FLAGS_TCP_CSUM; 9279 ETH_TX_BD_FLAGS_TCP_CSUM;
8346 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF; 9280 pbd->tcp_flags = pbd_tcp_flags(skb);
8347 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2); 9281 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
8348 pbd->tcp_pseudo_csum = swab16(th->check); 9282 pbd->tcp_pseudo_csum = swab16(th->check);
8349 9283
@@ -8387,7 +9321,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8387 9321
8388 if (skb_shinfo(skb)->gso_size && 9322 if (skb_shinfo(skb)->gso_size &&
8389 (skb->len > (bp->dev->mtu + ETH_HLEN))) { 9323 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
8390 int hlen = 2 * le32_to_cpu(pbd->total_hlen); 9324 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
8391 9325
8392 DP(NETIF_MSG_TX_QUEUED, 9326 DP(NETIF_MSG_TX_QUEUED,
8393 "TSO packet len %d hlen %d total len %d tso size %d\n", 9327 "TSO packet len %d hlen %d total len %d tso size %d\n",
@@ -8427,7 +9361,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8427 tx_bd->vlan = cpu_to_le16(pkt_prod); 9361 tx_bd->vlan = cpu_to_le16(pkt_prod);
8428 /* this marks the bd 9362 /* this marks the bd
8429 * as one that has no individual mapping 9363 * as one that has no individual mapping
8430 * the FW ignors this flag in a bd not maked start 9364 * the FW ignores this flag in a bd not marked start
8431 */ 9365 */
8432 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO; 9366 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
8433 DP(NETIF_MSG_TX_QUEUED, 9367 DP(NETIF_MSG_TX_QUEUED,
@@ -8504,9 +9438,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8504 9438
8505 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod); 9439 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
8506 9440
8507 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd); 9441 fp->hw_tx_prods->bds_prod =
9442 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
8508 mb(); /* FW restriction: must not reorder writing nbd and packets */ 9443 mb(); /* FW restriction: must not reorder writing nbd and packets */
8509 fp->hw_tx_prods->packets_prod += cpu_to_le32(1); 9444 fp->hw_tx_prods->packets_prod =
9445 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8510 DOORBELL(bp, fp_index, 0); 9446 DOORBELL(bp, fp_index, 0);
8511 9447
8512 mmiowb(); 9448 mmiowb();
@@ -8525,11 +9461,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8525 return NETDEV_TX_OK; 9461 return NETDEV_TX_OK;
8526} 9462}
8527 9463
8528static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
8529{
8530 return &dev->stats;
8531}
8532
8533/* Called with rtnl_lock */ 9464/* Called with rtnl_lock */
8534static int bnx2x_open(struct net_device *dev) 9465static int bnx2x_open(struct net_device *dev)
8535{ 9466{
@@ -8543,16 +9474,13 @@ static int bnx2x_open(struct net_device *dev)
8543/* Called with rtnl_lock */ 9474/* Called with rtnl_lock */
8544static int bnx2x_close(struct net_device *dev) 9475static int bnx2x_close(struct net_device *dev)
8545{ 9476{
8546 int rc;
8547 struct bnx2x *bp = netdev_priv(dev); 9477 struct bnx2x *bp = netdev_priv(dev);
8548 9478
8549 /* Unload the driver, release IRQs */ 9479 /* Unload the driver, release IRQs */
8550 rc = bnx2x_nic_unload(bp, 1); 9480 bnx2x_nic_unload(bp, 1);
8551 if (rc) { 9481
8552 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc); 9482 if (!CHIP_REV_IS_SLOW(bp))
8553 return rc; 9483 bnx2x_set_power_state(bp, PCI_D3hot);
8554 }
8555 bnx2x_set_power_state(bp, PCI_D3hot);
8556 9484
8557 return 0; 9485 return 0;
8558} 9486}
@@ -8584,7 +9512,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8584 case SIOCGMIIPHY: 9512 case SIOCGMIIPHY:
8585 data->phy_id = bp->phy_addr; 9513 data->phy_id = bp->phy_addr;
8586 9514
8587 /* fallthru */ 9515 /* fallthrough */
8588 case SIOCGMIIREG: { 9516 case SIOCGMIIREG: {
8589 u32 mii_regval; 9517 u32 mii_regval;
8590 9518
@@ -8633,7 +9561,7 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
8633 return -EINVAL; 9561 return -EINVAL;
8634 9562
8635 /* This does not race with packet allocation 9563 /* This does not race with packet allocation
8636 * because the actuall alloc size is 9564 * because the actual alloc size is
8637 * only updated as part of load 9565 * only updated as part of load
8638 */ 9566 */
8639 dev->mtu = new_mtu; 9567 dev->mtu = new_mtu;
@@ -8666,7 +9594,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
8666 9594
8667 bp->vlgrp = vlgrp; 9595 bp->vlgrp = vlgrp;
8668 if (netif_running(dev)) 9596 if (netif_running(dev))
8669 bnx2x_set_rx_mode(dev); 9597 bnx2x_set_client_config(bp);
8670} 9598}
8671#endif 9599#endif
8672 9600
@@ -8695,14 +9623,18 @@ static void bnx2x_reset_task(struct work_struct *work)
8695 if (!netif_running(bp->dev)) 9623 if (!netif_running(bp->dev))
8696 return; 9624 return;
8697 9625
8698 bp->in_reset_task = 1; 9626 rtnl_lock();
8699 9627
8700 bnx2x_netif_stop(bp); 9628 if (bp->state != BNX2X_STATE_OPEN) {
9629 DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
9630 goto reset_task_exit;
9631 }
8701 9632
8702 bnx2x_nic_unload(bp, 0); 9633 bnx2x_nic_unload(bp, 0);
8703 bnx2x_nic_load(bp, 0); 9634 bnx2x_nic_load(bp, 0);
8704 9635
8705 bp->in_reset_task = 0; 9636reset_task_exit:
9637 rtnl_unlock();
8706} 9638}
8707 9639
8708static int __devinit bnx2x_init_board(struct pci_dev *pdev, 9640static int __devinit bnx2x_init_board(struct pci_dev *pdev,
@@ -8783,8 +9715,6 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev,
8783 9715
8784 spin_lock_init(&bp->phy_lock); 9716 spin_lock_init(&bp->phy_lock);
8785 9717
8786 bp->in_reset_task = 0;
8787
8788 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 9718 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8789 INIT_WORK(&bp->sp_task, bnx2x_sp_task); 9719 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
8790 9720
@@ -8813,7 +9743,7 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev,
8813 bnx2x_get_hwinfo(bp); 9743 bnx2x_get_hwinfo(bp);
8814 9744
8815 if (CHIP_REV(bp) == CHIP_REV_FPGA) { 9745 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
8816 printk(KERN_ERR PFX "FPGA detacted. MCP disabled," 9746 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
8817 " will only init first device\n"); 9747 " will only init first device\n");
8818 onefunc = 1; 9748 onefunc = 1;
8819 nomcp = 1; 9749 nomcp = 1;
@@ -8882,14 +9812,32 @@ err_out:
8882 return rc; 9812 return rc;
8883} 9813}
8884 9814
9815static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9816{
9817 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9818
9819 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9820 return val;
9821}
9822
9823/* return value of 1=2.5GHz 2=5GHz */
9824static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9825{
9826 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9827
9828 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9829 return val;
9830}
9831
8885static int __devinit bnx2x_init_one(struct pci_dev *pdev, 9832static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8886 const struct pci_device_id *ent) 9833 const struct pci_device_id *ent)
8887{ 9834{
8888 static int version_printed; 9835 static int version_printed;
8889 struct net_device *dev = NULL; 9836 struct net_device *dev = NULL;
8890 struct bnx2x *bp; 9837 struct bnx2x *bp;
8891 int rc, i; 9838 int rc;
8892 int port = PCI_FUNC(pdev->devfn); 9839 int port = PCI_FUNC(pdev->devfn);
9840 DECLARE_MAC_BUF(mac);
8893 9841
8894 if (version_printed++ == 0) 9842 if (version_printed++ == 0)
8895 printk(KERN_INFO "%s", version); 9843 printk(KERN_INFO "%s", version);
@@ -8906,6 +9854,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8906 9854
8907 if (port && onefunc) { 9855 if (port && onefunc) {
8908 printk(KERN_ERR PFX "second function disabled. exiting\n"); 9856 printk(KERN_ERR PFX "second function disabled. exiting\n");
9857 free_netdev(dev);
8909 return 0; 9858 return 0;
8910 } 9859 }
8911 9860
@@ -8918,7 +9867,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8918 dev->hard_start_xmit = bnx2x_start_xmit; 9867 dev->hard_start_xmit = bnx2x_start_xmit;
8919 dev->watchdog_timeo = TX_TIMEOUT; 9868 dev->watchdog_timeo = TX_TIMEOUT;
8920 9869
8921 dev->get_stats = bnx2x_get_stats;
8922 dev->ethtool_ops = &bnx2x_ethtool_ops; 9870 dev->ethtool_ops = &bnx2x_ethtool_ops;
8923 dev->open = bnx2x_open; 9871 dev->open = bnx2x_open;
8924 dev->stop = bnx2x_close; 9872 dev->stop = bnx2x_close;
@@ -8944,7 +9892,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8944 9892
8945 rc = register_netdev(dev); 9893 rc = register_netdev(dev);
8946 if (rc) { 9894 if (rc) {
8947 printk(KERN_ERR PFX "Cannot register net device\n"); 9895 dev_err(&pdev->dev, "Cannot register net device\n");
8948 if (bp->regview) 9896 if (bp->regview)
8949 iounmap(bp->regview); 9897 iounmap(bp->regview);
8950 if (bp->doorbells) 9898 if (bp->doorbells)
@@ -8959,32 +9907,30 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8959 pci_set_drvdata(pdev, dev); 9907 pci_set_drvdata(pdev, dev);
8960 9908
8961 bp->name = board_info[ent->driver_data].name; 9909 bp->name = board_info[ent->driver_data].name;
8962 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz " 9910 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
8963 "found at mem %lx, IRQ %d, ", 9911 " IRQ %d, ", dev->name, bp->name,
8964 dev->name, bp->name,
8965 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 9912 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8966 ((CHIP_ID(bp) & 0x0ff0) >> 4), 9913 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8967 ((bp->flags & PCIX_FLAG) ? "-X" : ""), 9914 bnx2x_get_pcie_width(bp),
8968 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), 9915 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
8969 bp->bus_speed_mhz, 9916 dev->base_addr, bp->pdev->irq);
8970 dev->base_addr, 9917 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
8971 bp->pdev->irq);
8972
8973 printk("node addr ");
8974 for (i = 0; i < 6; i++)
8975 printk("%2.2x", dev->dev_addr[i]);
8976 printk("\n");
8977
8978 return 0; 9918 return 0;
8979} 9919}
8980 9920
8981static void __devexit bnx2x_remove_one(struct pci_dev *pdev) 9921static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8982{ 9922{
8983 struct net_device *dev = pci_get_drvdata(pdev); 9923 struct net_device *dev = pci_get_drvdata(pdev);
8984 struct bnx2x *bp = netdev_priv(dev); 9924 struct bnx2x *bp;
9925
9926 if (!dev) {
9927 /* we get here if init_one() fails */
9928 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
9929 return;
9930 }
9931
9932 bp = netdev_priv(dev);
8985 9933
8986 flush_scheduled_work();
8987 /*tasklet_kill(&bp->sp_task);*/
8988 unregister_netdev(dev); 9934 unregister_netdev(dev);
8989 9935
8990 if (bp->regview) 9936 if (bp->regview)
@@ -9002,34 +9948,43 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9002static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 9948static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9003{ 9949{
9004 struct net_device *dev = pci_get_drvdata(pdev); 9950 struct net_device *dev = pci_get_drvdata(pdev);
9005 struct bnx2x *bp = netdev_priv(dev); 9951 struct bnx2x *bp;
9006 int rc; 9952
9953 if (!dev)
9954 return 0;
9007 9955
9008 if (!netif_running(dev)) 9956 if (!netif_running(dev))
9009 return 0; 9957 return 0;
9010 9958
9011 rc = bnx2x_nic_unload(bp, 0); 9959 bp = netdev_priv(dev);
9012 if (!rc) 9960
9013 return rc; 9961 bnx2x_nic_unload(bp, 0);
9014 9962
9015 netif_device_detach(dev); 9963 netif_device_detach(dev);
9016 pci_save_state(pdev);
9017 9964
9965 pci_save_state(pdev);
9018 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 9966 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9967
9019 return 0; 9968 return 0;
9020} 9969}
9021 9970
9022static int bnx2x_resume(struct pci_dev *pdev) 9971static int bnx2x_resume(struct pci_dev *pdev)
9023{ 9972{
9024 struct net_device *dev = pci_get_drvdata(pdev); 9973 struct net_device *dev = pci_get_drvdata(pdev);
9025 struct bnx2x *bp = netdev_priv(dev); 9974 struct bnx2x *bp;
9026 int rc; 9975 int rc;
9027 9976
9977 if (!dev) {
9978 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
9979 return -ENODEV;
9980 }
9981
9028 if (!netif_running(dev)) 9982 if (!netif_running(dev))
9029 return 0; 9983 return 0;
9030 9984
9031 pci_restore_state(pdev); 9985 bp = netdev_priv(dev);
9032 9986
9987 pci_restore_state(pdev);
9033 bnx2x_set_power_state(bp, PCI_D0); 9988 bnx2x_set_power_state(bp, PCI_D0);
9034 netif_device_attach(dev); 9989 netif_device_attach(dev);
9035 9990
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 4f7ae6f77452..4f0c0d31e7c1 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -1,6 +1,6 @@
1/* bnx2x.h: Broadcom Everest network driver. 1/* bnx2x.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007 Broadcom Corporation 3 * Copyright (c) 2007-2008 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -24,6 +24,8 @@
24#define BNX2X_MSG_STATS 0x20000 /* was: NETIF_MSG_TIMER */ 24#define BNX2X_MSG_STATS 0x20000 /* was: NETIF_MSG_TIMER */
25#define NETIF_MSG_NVM 0x40000 /* was: NETIF_MSG_HW */ 25#define NETIF_MSG_NVM 0x40000 /* was: NETIF_MSG_HW */
26#define NETIF_MSG_DMAE 0x80000 /* was: NETIF_MSG_HW */ 26#define NETIF_MSG_DMAE 0x80000 /* was: NETIF_MSG_HW */
27#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
28#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
27 29
28#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */ 30#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
29 31
@@ -40,6 +42,12 @@
40 __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \ 42 __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
41 } while (0) 43 } while (0)
42 44
45/* for logging (never masked) */
46#define BNX2X_LOG(__fmt, __args...) do { \
47 printk(KERN_NOTICE "[%s:%d(%s)]" __fmt, __FUNCTION__, \
48 __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \
49 } while (0)
50
43/* before we have a dev->name use dev_info() */ 51/* before we have a dev->name use dev_info() */
44#define BNX2X_DEV_INFO(__fmt, __args...) do { \ 52#define BNX2X_DEV_INFO(__fmt, __args...) do { \
45 if (bp->msglevel & NETIF_MSG_PROBE) \ 53 if (bp->msglevel & NETIF_MSG_PROBE) \
@@ -423,8 +431,6 @@ struct bnx2x_fastpath {
423#define BNX2X_FP_STATE_OPEN 0xa0000 431#define BNX2X_FP_STATE_OPEN 0xa0000
424#define BNX2X_FP_STATE_HALTING 0xb0000 432#define BNX2X_FP_STATE_HALTING 0xb0000
425#define BNX2X_FP_STATE_HALTED 0xc0000 433#define BNX2X_FP_STATE_HALTED 0xc0000
426#define BNX2X_FP_STATE_DELETED 0xd0000
427#define BNX2X_FP_STATE_CLOSE_IRQ 0xe0000
428 434
429 int index; 435 int index;
430 436
@@ -505,7 +511,6 @@ struct bnx2x {
505 struct eth_spe *spq; 511 struct eth_spe *spq;
506 dma_addr_t spq_mapping; 512 dma_addr_t spq_mapping;
507 u16 spq_prod_idx; 513 u16 spq_prod_idx;
508 u16 dsb_sp_prod_idx;
509 struct eth_spe *spq_prod_bd; 514 struct eth_spe *spq_prod_bd;
510 struct eth_spe *spq_last_bd; 515 struct eth_spe *spq_last_bd;
511 u16 *dsb_sp_prod; 516 u16 *dsb_sp_prod;
@@ -517,7 +522,7 @@ struct bnx2x {
517 */ 522 */
518 u8 stat_pending; 523 u8 stat_pending;
519 524
520 /* End of fileds used in the performance code paths */ 525 /* End of fields used in the performance code paths */
521 526
522 int panic; 527 int panic;
523 int msglevel; 528 int msglevel;
@@ -540,8 +545,6 @@ struct bnx2x {
540 spinlock_t phy_lock; 545 spinlock_t phy_lock;
541 546
542 struct work_struct reset_task; 547 struct work_struct reset_task;
543 u16 in_reset_task;
544
545 struct work_struct sp_task; 548 struct work_struct sp_task;
546 549
547 struct timer_list timer; 550 struct timer_list timer;
@@ -555,7 +558,6 @@ struct bnx2x {
555#define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0) 558#define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0)
556 559
557#define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000) 560#define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000)
558#define CHIP_NUM_5710 0x57100000
559 561
560#define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000) 562#define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000)
561#define CHIP_REV_Ax 0x00000000 563#define CHIP_REV_Ax 0x00000000
@@ -574,7 +576,8 @@ struct bnx2x {
574 u32 fw_mb; 576 u32 fw_mb;
575 577
576 u32 hw_config; 578 u32 hw_config;
577 u32 serdes_config; 579 u32 board;
580 u32 serdes_config;
578 u32 lane_config; 581 u32 lane_config;
579 u32 ext_phy_config; 582 u32 ext_phy_config;
580#define XGXS_EXT_PHY_TYPE(bp) (bp->ext_phy_config & \ 583#define XGXS_EXT_PHY_TYPE(bp) (bp->ext_phy_config & \
@@ -595,11 +598,11 @@ struct bnx2x {
595 u8 tx_lane_swap; 598 u8 tx_lane_swap;
596 599
597 u8 link_up; 600 u8 link_up;
601 u8 phy_link_up;
598 602
599 u32 supported; 603 u32 supported;
600/* link settings - missing defines */ 604/* link settings - missing defines */
601#define SUPPORTED_2500baseT_Full (1 << 15) 605#define SUPPORTED_2500baseT_Full (1 << 15)
602#define SUPPORTED_CX4 (1 << 16)
603 606
604 u32 phy_flags; 607 u32 phy_flags;
605/*#define PHY_SERDES_FLAG 0x1*/ 608/*#define PHY_SERDES_FLAG 0x1*/
@@ -644,16 +647,9 @@ struct bnx2x {
644#define FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH 647#define FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
645#define FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE 648#define FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
646 649
647 u32 pause_mode;
648#define PAUSE_NONE 0
649#define PAUSE_SYMMETRIC 1
650#define PAUSE_ASYMMETRIC 2
651#define PAUSE_BOTH 3
652
653 u32 advertising; 650 u32 advertising;
654/* link settings - missing defines */ 651/* link settings - missing defines */
655#define ADVERTISED_2500baseT_Full (1 << 15) 652#define ADVERTISED_2500baseT_Full (1 << 15)
656#define ADVERTISED_CX4 (1 << 16)
657 653
658 u32 link_status; 654 u32 link_status;
659 u32 line_speed; 655 u32 line_speed;
@@ -667,6 +663,8 @@ struct bnx2x {
667#define NVRAM_TIMEOUT_COUNT 30000 663#define NVRAM_TIMEOUT_COUNT 30000
668#define NVRAM_PAGE_SIZE 256 664#define NVRAM_PAGE_SIZE 256
669 665
666 u8 wol;
667
670 int rx_ring_size; 668 int rx_ring_size;
671 669
672 u16 tx_quick_cons_trip_int; 670 u16 tx_quick_cons_trip_int;
@@ -718,9 +716,6 @@ struct bnx2x {
718#endif 716#endif
719 717
720 char *name; 718 char *name;
721 u16 bus_speed_mhz;
722 u8 wol;
723 u8 pad;
724 719
725 /* used to synchronize stats collecting */ 720 /* used to synchronize stats collecting */
726 int stats_state; 721 int stats_state;
@@ -856,8 +851,8 @@ struct bnx2x {
856#define MAX_SPQ_PENDING 8 851#define MAX_SPQ_PENDING 8
857 852
858 853
859#define BNX2X_NUM_STATS 31 854#define BNX2X_NUM_STATS 34
860#define BNX2X_NUM_TESTS 2 855#define BNX2X_NUM_TESTS 1
861 856
862 857
863#define DPM_TRIGER_TYPE 0x40 858#define DPM_TRIGER_TYPE 0x40
@@ -867,6 +862,15 @@ struct bnx2x {
867 DPM_TRIGER_TYPE); \ 862 DPM_TRIGER_TYPE); \
868 } while (0) 863 } while (0)
869 864
865/* PCIE link and speed */
866#define PCICFG_LINK_WIDTH 0x1f00000
867#define PCICFG_LINK_WIDTH_SHIFT 20
868#define PCICFG_LINK_SPEED 0xf0000
869#define PCICFG_LINK_SPEED_SHIFT 16
870
871#define BMAC_CONTROL_RX_ENABLE 2
872
873#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
870 874
871/* stuff added to make the code fit 80Col */ 875/* stuff added to make the code fit 80Col */
872 876
@@ -939,13 +943,13 @@ struct bnx2x {
939#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD 943#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
940#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD 944#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
941 945
942#define NIG_STATUS_INTERRUPT_XGXS0_LINK10G \ 946#define NIG_STATUS_XGXS0_LINK10G \
943 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G 947 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
944#define NIG_XGXS0_LINK_STATUS \ 948#define NIG_STATUS_XGXS0_LINK_STATUS \
945 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS 949 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
946#define NIG_XGXS0_LINK_STATUS_SIZE \ 950#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
947 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 951 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
948#define NIG_SERDES0_LINK_STATUS \ 952#define NIG_STATUS_SERDES0_LINK_STATUS \
949 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS 953 NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
950#define NIG_MASK_MI_INT \ 954#define NIG_MASK_MI_INT \
951 NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT 955 NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index 62a6eb81025a..3b968904ca65 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
1/* bnx2x_fw_defs.h: Broadcom Everest network driver. 1/* bnx2x_fw_defs.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007 Broadcom Corporation 3 * Copyright (c) 2007-2008 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 6fd959c34d1f..b21075ccb52e 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1,6 +1,6 @@
1/* bnx2x_hsi.h: Broadcom Everest network driver. 1/* bnx2x_hsi.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007 Broadcom Corporation 3 * Copyright (c) 2007-2008 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -8,169 +8,9 @@
8 */ 8 */
9 9
10 10
11#define FUNC_0 0 11#define PORT_0 0
12#define FUNC_1 1 12#define PORT_1 1
13#define FUNC_MAX 2 13#define PORT_MAX 2
14
15
16/* This value (in milliseconds) determines the frequency of the driver
17 * issuing the PULSE message code. The firmware monitors this periodic
18 * pulse to determine when to switch to an OS-absent mode. */
19#define DRV_PULSE_PERIOD_MS 250
20
21/* This value (in milliseconds) determines how long the driver should
22 * wait for an acknowledgement from the firmware before timing out. Once
23 * the firmware has timed out, the driver will assume there is no firmware
24 * running and there won't be any firmware-driver synchronization during a
25 * driver reset. */
26#define FW_ACK_TIME_OUT_MS 5000
27
28#define FW_ACK_POLL_TIME_MS 1
29
30#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
31
32/* LED Blink rate that will achieve ~15.9Hz */
33#define LED_BLINK_RATE_VAL 480
34
35/****************************************************************************
36 * Driver <-> FW Mailbox *
37 ****************************************************************************/
38struct drv_fw_mb {
39 u32 drv_mb_header;
40#define DRV_MSG_CODE_MASK 0xffff0000
41#define DRV_MSG_CODE_LOAD_REQ 0x10000000
42#define DRV_MSG_CODE_LOAD_DONE 0x11000000
43#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
44#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
45#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
46#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
47#define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
48#define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
49#define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
50#define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
51#define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
52#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
53#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
54
55#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
56
57 u32 drv_mb_param;
58
59 u32 fw_mb_header;
60#define FW_MSG_CODE_MASK 0xffff0000
61#define FW_MSG_CODE_DRV_LOAD_COMMON 0x11000000
62#define FW_MSG_CODE_DRV_LOAD_PORT 0x12000000
63#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x13000000
64#define FW_MSG_CODE_DRV_LOAD_DONE 0x14000000
65#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x21000000
66#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x22000000
67#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x23000000
68#define FW_MSG_CODE_DIAG_ENTER_DONE 0x50000000
69#define FW_MSG_CODE_DIAG_REFUSE 0x51000000
70#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70000000
71#define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x71000000
72#define FW_MSG_CODE_GET_KEY_DONE 0x80000000
73#define FW_MSG_CODE_NO_KEY 0x8f000000
74#define FW_MSG_CODE_LIC_INFO_NOT_READY 0x8f800000
75#define FW_MSG_CODE_L2B_PRAM_LOADED 0x90000000
76#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x91000000
77#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x92000000
78#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x93000000
79#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x94000000
80
81#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
82
83 u32 fw_mb_param;
84
85 u32 link_status;
86 /* Driver should update this field on any link change event */
87
88#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
89#define LINK_STATUS_LINK_UP 0x00000001
90#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
91#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
92#define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
93#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
94#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
95#define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
96#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
97#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
98#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
99#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
100#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
101#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
102#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
103#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
104#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
105#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1)
106#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1)
107#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1)
108#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1)
109#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1)
110#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1)
111#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1)
112#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1)
113#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1)
114#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1)
115
116#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
117#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
118
119#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
120#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
121#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
122
123#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
124#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
125#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
126#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
127#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
128#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
129#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
130
131#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
132#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
133
134#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
135#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
136
137#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
138#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
139#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
140#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
141#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
142
143#define LINK_STATUS_SERDES_LINK 0x00100000
144
145#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
146#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
147#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
148#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000
149#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000
150#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000
151#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000
152#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000
153
154 u32 drv_pulse_mb;
155#define DRV_PULSE_SEQ_MASK 0x00007fff
156#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
157 /* The system time is in the format of
158 * (year-2001)*12*32 + month*32 + day. */
159#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
160 /* Indicate to the firmware not to go into the
161 * OS-absent when it is not getting driver pulse.
162 * This is used for debugging as well for PXE(MBA). */
163
164 u32 mcp_pulse_mb;
165#define MCP_PULSE_SEQ_MASK 0x00007fff
166#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
167 /* Indicates to the driver not to assert due to lack
168 * of MCP response */
169#define MCP_EVENT_MASK 0xffff0000
170#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
171
172};
173
174 14
175/**************************************************************************** 15/****************************************************************************
176 * Shared HW configuration * 16 * Shared HW configuration *
@@ -249,7 +89,7 @@ struct shared_hw_cfg { /* NVRAM Offset */
249#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 89#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000
250#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 90#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000
251 91
252#define SHARED_HW_CFG_HIDE_FUNC1 0x00002000 92#define SHARED_HW_CFG_HIDE_PORT1 0x00002000
253 93
254 u32 power_dissipated; /* 0x11c */ 94 u32 power_dissipated; /* 0x11c */
255#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 95#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000
@@ -290,6 +130,8 @@ struct shared_hw_cfg { /* NVRAM Offset */
290#define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1015G 0x00000006 130#define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1015G 0x00000006
291#define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1020G 0x00000007 131#define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1020G 0x00000007
292#define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G 0x00000008 132#define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G 0x00000008
133#define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G 0x00000009
134#define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G 0x0000000a
293 135
294#define SHARED_HW_CFG_BOARD_VER_MASK 0xffff0000 136#define SHARED_HW_CFG_BOARD_VER_MASK 0xffff0000
295#define SHARED_HW_CFG_BOARD_VER_SHIFT 16 137#define SHARED_HW_CFG_BOARD_VER_SHIFT 16
@@ -304,13 +146,12 @@ struct shared_hw_cfg { /* NVRAM Offset */
304 146
305}; 147};
306 148
149
307/**************************************************************************** 150/****************************************************************************
308 * Port HW configuration * 151 * Port HW configuration *
309 ****************************************************************************/ 152 ****************************************************************************/
310struct port_hw_cfg { /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */ 153struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
311 154
312 /* Fields below are port specific (in anticipation of dual port
313 devices */
314 u32 pci_id; 155 u32 pci_id;
315#define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 156#define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000
316#define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff 157#define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff
@@ -420,6 +261,8 @@ struct port_hw_cfg { /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */
420#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 261#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500
421#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8276 0x00000600 262#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8276 0x00000600
422#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 263#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700
264#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
265#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
423#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 266#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
424 267
425#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff 268#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff
@@ -462,11 +305,13 @@ struct port_hw_cfg { /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */
462 305
463}; 306};
464 307
308
465/**************************************************************************** 309/****************************************************************************
466 * Shared Feature configuration * 310 * Shared Feature configuration *
467 ****************************************************************************/ 311 ****************************************************************************/
468struct shared_feat_cfg { /* NVRAM Offset */ 312struct shared_feat_cfg { /* NVRAM Offset */
469 u32 bmc_common; /* 0x450 */ 313
314 u32 config; /* 0x450 */
470#define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 315#define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001
471 316
472}; 317};
@@ -475,7 +320,8 @@ struct shared_feat_cfg { /* NVRAM Offset */
475/**************************************************************************** 320/****************************************************************************
476 * Port Feature configuration * 321 * Port Feature configuration *
477 ****************************************************************************/ 322 ****************************************************************************/
478struct port_feat_cfg { /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */ 323struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
324
479 u32 config; 325 u32 config;
480#define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f 326#define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f
481#define PORT_FEATURE_BAR1_SIZE_SHIFT 0 327#define PORT_FEATURE_BAR1_SIZE_SHIFT 0
@@ -609,8 +455,7 @@ struct port_feat_cfg { /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */
609#define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe 455#define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe
610#define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 456#define PORT_FEATURE_SMBUS_ADDR_SHIFT 1
611 457
612 u32 iscsib_boot_cfg; 458 u32 reserved1;
613#define PORT_FEATURE_ISCSIB_SKIP_TARGET_BOOT 0x00000001
614 459
615 u32 link_config; /* Used as HW defaults for the driver */ 460 u32 link_config; /* Used as HW defaults for the driver */
616#define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 461#define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000
@@ -657,20 +502,201 @@ struct port_feat_cfg { /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */
657}; 502};
658 503
659 504
505/*****************************************************************************
506 * Device Information *
507 *****************************************************************************/
508struct dev_info { /* size */
509
510 u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */
511
512 struct shared_hw_cfg shared_hw_config; /* 40 */
513
514 struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */
515
516 struct shared_feat_cfg shared_feature_config; /* 4 */
517
518 struct port_feat_cfg port_feature_config[PORT_MAX]; /* 116*2=232 */
519
520};
521
522
523#define FUNC_0 0
524#define FUNC_1 1
525#define E1_FUNC_MAX 2
526#define FUNC_MAX E1_FUNC_MAX
527
528
529/* This value (in milliseconds) determines the frequency of the driver
530 * issuing the PULSE message code. The firmware monitors this periodic
531 * pulse to determine when to switch to an OS-absent mode. */
532#define DRV_PULSE_PERIOD_MS 250
533
534/* This value (in milliseconds) determines how long the driver should
535 * wait for an acknowledgement from the firmware before timing out. Once
536 * the firmware has timed out, the driver will assume there is no firmware
537 * running and there won't be any firmware-driver synchronization during a
538 * driver reset. */
539#define FW_ACK_TIME_OUT_MS 5000
540
541#define FW_ACK_POLL_TIME_MS 1
542
543#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
544
545/* LED Blink rate that will achieve ~15.9Hz */
546#define LED_BLINK_RATE_VAL 480
547
660/**************************************************************************** 548/****************************************************************************
661 * Device Information * 549 * Driver <-> FW Mailbox *
662 ****************************************************************************/ 550 ****************************************************************************/
663struct dev_info { /* size */ 551struct drv_port_mb {
552
553 u32 link_status;
554 /* Driver should update this field on any link change event */
555
556#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
557#define LINK_STATUS_LINK_UP 0x00000001
558#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
559#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1)
560#define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1)
561#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1)
562#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1)
563#define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1)
564#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1)
565#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1)
566#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1)
567#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1)
568#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1)
569#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1)
570#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1)
571#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1)
572#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1)
573#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1)
574#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1)
575#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1)
576#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1)
577#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1)
578#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1)
579#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1)
580#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1)
581#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1)
582#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1)
583
584#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020
585#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
586
587#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
588#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080
589#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
590
591#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
592#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
593#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800
594#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000
595#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000
596#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000
597#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000
598
599#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000
600#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000
601
602#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000
603#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000
604
605#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
606#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18)
607#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18)
608#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18)
609#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18)
610
611#define LINK_STATUS_SERDES_LINK 0x00100000
612
613#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000
614#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000
615#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000
616#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000
617#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000
618#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000
619#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000
620#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000
664 621
665 u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ 622 u32 reserved[3];
666 623
667 struct shared_hw_cfg shared_hw_config; /* 40 */ 624};
625
626
627struct drv_func_mb {
628
629 u32 drv_mb_header;
630#define DRV_MSG_CODE_MASK 0xffff0000
631#define DRV_MSG_CODE_LOAD_REQ 0x10000000
632#define DRV_MSG_CODE_LOAD_DONE 0x11000000
633#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000
634#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000
635#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000
636#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
637#define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000
638#define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000
639#define DRV_MSG_CODE_VALIDATE_KEY 0x70000000
640#define DRV_MSG_CODE_GET_CURR_KEY 0x80000000
641#define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000
642#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
643#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
644
645#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
646
647 u32 drv_mb_param;
648
649 u32 fw_mb_header;
650#define FW_MSG_CODE_MASK 0xffff0000
651#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000
652#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
653#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
654#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000
655#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
656#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000
657#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000
658#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000
659#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
660#define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000
661#define FW_MSG_CODE_DIAG_REFUSE 0x50200000
662#define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000
663#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000
664#define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000
665#define FW_MSG_CODE_GET_KEY_DONE 0x80100000
666#define FW_MSG_CODE_NO_KEY 0x80f00000
667#define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000
668#define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000
669#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000
670#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000
671#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000
672#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000
673
674#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
675
676 u32 fw_mb_param;
677
678 u32 drv_pulse_mb;
679#define DRV_PULSE_SEQ_MASK 0x00007fff
680#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
681 /* The system time is in the format of
682 * (year-2001)*12*32 + month*32 + day. */
683#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
684 /* Indicate to the firmware not to go into the
685 * OS-absent when it is not getting driver pulse.
686 * This is used for debugging as well for PXE(MBA). */
668 687
669 struct port_hw_cfg port_hw_config[FUNC_MAX]; /* 400*2=800 */ 688 u32 mcp_pulse_mb;
689#define MCP_PULSE_SEQ_MASK 0x00007fff
690#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
691 /* Indicates to the driver not to assert due to lack
692 * of MCP response */
693#define MCP_EVENT_MASK 0xffff0000
694#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
670 695
671 struct shared_feat_cfg shared_feature_config; /* 4 */ 696 u32 iscsi_boot_signature;
697 u32 iscsi_boot_block_offset;
672 698
673 struct port_feat_cfg port_feature_config[FUNC_MAX];/* 116*2=232 */ 699 u32 reserved[3];
674 700
675}; 701};
676 702
@@ -678,9 +704,8 @@ struct dev_info { /* size */
678/**************************************************************************** 704/****************************************************************************
679 * Management firmware state * 705 * Management firmware state *
680 ****************************************************************************/ 706 ****************************************************************************/
681/* Allocate 320 bytes for management firmware: still not known exactly 707/* Allocate 440 bytes for management firmware */
682 * how much IMD needs. */ 708#define MGMTFW_STATE_WORD_SIZE 110
683#define MGMTFW_STATE_WORD_SIZE 80
684 709
685struct mgmtfw_state { 710struct mgmtfw_state {
686 u32 opaque[MGMTFW_STATE_WORD_SIZE]; 711 u32 opaque[MGMTFW_STATE_WORD_SIZE];
@@ -691,31 +716,40 @@ struct mgmtfw_state {
691 * Shared Memory Region * 716 * Shared Memory Region *
692 ****************************************************************************/ 717 ****************************************************************************/
693struct shmem_region { /* SharedMem Offset (size) */ 718struct shmem_region { /* SharedMem Offset (size) */
694 u32 validity_map[FUNC_MAX]; /* 0x0 (4 * 2 = 0x8) */ 719
695#define SHR_MEM_VALIDITY_PCI_CFG 0x00000001 720 u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */
696#define SHR_MEM_VALIDITY_MB 0x00000002 721#define SHR_MEM_FORMAT_REV_ID ('A'<<24)
697#define SHR_MEM_VALIDITY_DEV_INFO 0x00000004 722#define SHR_MEM_FORMAT_REV_MASK 0xff000000
723 /* validity bits */
724#define SHR_MEM_VALIDITY_PCI_CFG 0x00100000
725#define SHR_MEM_VALIDITY_MB 0x00200000
726#define SHR_MEM_VALIDITY_DEV_INFO 0x00400000
727#define SHR_MEM_VALIDITY_RESERVED 0x00000007
698 /* One licensing bit should be set */ 728 /* One licensing bit should be set */
699#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 729#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
700#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 730#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
701#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 731#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
702#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 732#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
733 /* Active MFW */
734#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
735#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040
736#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080
737#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0
738#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
739#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
703 740
704 struct drv_fw_mb drv_fw_mb[FUNC_MAX]; /* 0x8 (28 * 2 = 0x38) */ 741 struct dev_info dev_info; /* 0x8 (0x438) */
705
706 struct dev_info dev_info; /* 0x40 (0x438) */
707 742
708#ifdef _LICENSE_H 743 u8 reserved[52*PORT_MAX];
709 license_key_t drv_lic_key[FUNC_MAX]; /* 0x478 (52 * 2 = 0x68) */
710#else /* Linux! */
711 u8 reserved[52*FUNC_MAX];
712#endif
713 744
714 /* FW information (for internal FW use) */ 745 /* FW information (for internal FW use) */
715 u32 fw_info_fio_offset; /* 0x4e0 (0x4) */ 746 u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
716 struct mgmtfw_state mgmtfw_state; /* 0x4e4 (0x140) */ 747 struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */
748
749 struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */
750 struct drv_func_mb func_mb[FUNC_MAX]; /* 0x684 (44*2=0x58) */
717 751
718}; /* 0x624 */ 752}; /* 0x6dc */
719 753
720 754
721#define BCM_5710_FW_MAJOR_VERSION 4 755#define BCM_5710_FW_MAJOR_VERSION 4
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 04f93bff2ef4..dcaecc53bdb1 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -1,6 +1,6 @@
1/* bnx2x_init.h: Broadcom Everest network driver. 1/* bnx2x_init.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007 Broadcom Corporation 3 * Copyright (c) 2007-2008 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -409,7 +409,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
409 409
410 pci_read_config_word(bp->pdev, 410 pci_read_config_word(bp->pdev,
411 bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val); 411 bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val);
412 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", val); 412 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val);
413 w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5); 413 w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
414 r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12); 414 r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12);
415 415
@@ -472,10 +472,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
472 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val); 472 REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
473 473
474 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order); 474 REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
475 REG_WR(bp, PXP2_REG_RQ_WR_MBS0 + 8, w_order); 475 REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
476 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); 476 REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
477 REG_WR(bp, PXP2_REG_RQ_RD_MBS0 + 8, r_order); 477 REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
478 478
479 if (r_order == MAX_RD_ORD)
480 REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
481
482 REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
479 REG_WR(bp, PXP2_REG_WR_DMAE_TH, (128 << w_order)/16); 483 REG_WR(bp, PXP2_REG_WR_DMAE_TH, (128 << w_order)/16);
480} 484}
481 485
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 86055297ab02..5a1aa0b55044 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -1,6 +1,6 @@
1/* bnx2x_reg.h: Broadcom Everest network driver. 1/* bnx2x_reg.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007 Broadcom Corporation 3 * Copyright (c) 2007-2008 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -24,6 +24,8 @@
24#define BRB1_REG_BRB1_INT_STS 0x6011c 24#define BRB1_REG_BRB1_INT_STS 0x6011c
25/* [RW 4] Parity mask register #0 read/write */ 25/* [RW 4] Parity mask register #0 read/write */
26#define BRB1_REG_BRB1_PRTY_MASK 0x60138 26#define BRB1_REG_BRB1_PRTY_MASK 0x60138
27/* [R 4] Parity register #0 read */
28#define BRB1_REG_BRB1_PRTY_STS 0x6012c
27/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 29/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
28 address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 30 address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
29 BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */ 31 BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */
@@ -281,6 +283,8 @@
281#define CDU_REG_CDU_INT_STS 0x101030 283#define CDU_REG_CDU_INT_STS 0x101030
282/* [RW 5] Parity mask register #0 read/write */ 284/* [RW 5] Parity mask register #0 read/write */
283#define CDU_REG_CDU_PRTY_MASK 0x10104c 285#define CDU_REG_CDU_PRTY_MASK 0x10104c
286/* [R 5] Parity register #0 read */
287#define CDU_REG_CDU_PRTY_STS 0x101040
284/* [RC 32] logging of error data in case of a CDU load error: 288/* [RC 32] logging of error data in case of a CDU load error:
285 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; 289 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
286 ype_error; ctual_active; ctual_compressed_context}; */ 290 ype_error; ctual_active; ctual_compressed_context}; */
@@ -308,6 +312,8 @@
308#define CFC_REG_CFC_INT_STS_CLR 0x104100 312#define CFC_REG_CFC_INT_STS_CLR 0x104100
309/* [RW 4] Parity mask register #0 read/write */ 313/* [RW 4] Parity mask register #0 read/write */
310#define CFC_REG_CFC_PRTY_MASK 0x104118 314#define CFC_REG_CFC_PRTY_MASK 0x104118
315/* [R 4] Parity register #0 read */
316#define CFC_REG_CFC_PRTY_STS 0x10410c
311/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ 317/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
312#define CFC_REG_CID_CAM 0x104800 318#define CFC_REG_CID_CAM 0x104800
313#define CFC_REG_CONTROL0 0x104028 319#define CFC_REG_CONTROL0 0x104028
@@ -354,6 +360,8 @@
354#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac 360#define CSDM_REG_CSDM_INT_MASK_1 0xc22ac
355/* [RW 11] Parity mask register #0 read/write */ 361/* [RW 11] Parity mask register #0 read/write */
356#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc 362#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
363/* [R 11] Parity register #0 read */
364#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
357#define CSDM_REG_ENABLE_IN1 0xc2238 365#define CSDM_REG_ENABLE_IN1 0xc2238
358#define CSDM_REG_ENABLE_IN2 0xc223c 366#define CSDM_REG_ENABLE_IN2 0xc223c
359#define CSDM_REG_ENABLE_OUT1 0xc2240 367#define CSDM_REG_ENABLE_OUT1 0xc2240
@@ -438,6 +446,9 @@
438/* [RW 32] Parity mask register #0 read/write */ 446/* [RW 32] Parity mask register #0 read/write */
439#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130 447#define CSEM_REG_CSEM_PRTY_MASK_0 0x200130
440#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140 448#define CSEM_REG_CSEM_PRTY_MASK_1 0x200140
449/* [R 32] Parity register #0 read */
450#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
451#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
441#define CSEM_REG_ENABLE_IN 0x2000a4 452#define CSEM_REG_ENABLE_IN 0x2000a4
442#define CSEM_REG_ENABLE_OUT 0x2000a8 453#define CSEM_REG_ENABLE_OUT 0x2000a8
443/* [RW 32] This address space contains all registers and memories that are 454/* [RW 32] This address space contains all registers and memories that are
@@ -526,6 +537,8 @@
526#define CSEM_REG_TS_9_AS 0x20005c 537#define CSEM_REG_TS_9_AS 0x20005c
527/* [RW 1] Parity mask register #0 read/write */ 538/* [RW 1] Parity mask register #0 read/write */
528#define DBG_REG_DBG_PRTY_MASK 0xc0a8 539#define DBG_REG_DBG_PRTY_MASK 0xc0a8
540/* [R 1] Parity register #0 read */
541#define DBG_REG_DBG_PRTY_STS 0xc09c
529/* [RW 2] debug only: These bits indicate the credit for PCI request type 4 542/* [RW 2] debug only: These bits indicate the credit for PCI request type 4
530 interface; MUST be configured AFTER pci_ext_buffer_strt_addr_lsb/msb are 543 interface; MUST be configured AFTER pci_ext_buffer_strt_addr_lsb/msb are
531 configured */ 544 configured */
@@ -543,6 +556,8 @@
543#define DMAE_REG_DMAE_INT_MASK 0x102054 556#define DMAE_REG_DMAE_INT_MASK 0x102054
544/* [RW 4] Parity mask register #0 read/write */ 557/* [RW 4] Parity mask register #0 read/write */
545#define DMAE_REG_DMAE_PRTY_MASK 0x102064 558#define DMAE_REG_DMAE_PRTY_MASK 0x102064
559/* [R 4] Parity register #0 read */
560#define DMAE_REG_DMAE_PRTY_STS 0x102058
546/* [RW 1] Command 0 go. */ 561/* [RW 1] Command 0 go. */
547#define DMAE_REG_GO_C0 0x102080 562#define DMAE_REG_GO_C0 0x102080
548/* [RW 1] Command 1 go. */ 563/* [RW 1] Command 1 go. */
@@ -623,6 +638,8 @@
623#define DORQ_REG_DORQ_INT_STS_CLR 0x170178 638#define DORQ_REG_DORQ_INT_STS_CLR 0x170178
624/* [RW 2] Parity mask register #0 read/write */ 639/* [RW 2] Parity mask register #0 read/write */
625#define DORQ_REG_DORQ_PRTY_MASK 0x170190 640#define DORQ_REG_DORQ_PRTY_MASK 0x170190
641/* [R 2] Parity register #0 read */
642#define DORQ_REG_DORQ_PRTY_STS 0x170184
626/* [RW 8] The address to write the DPM CID to STORM. */ 643/* [RW 8] The address to write the DPM CID to STORM. */
627#define DORQ_REG_DPM_CID_ADDR 0x170044 644#define DORQ_REG_DPM_CID_ADDR 0x170044
628/* [RW 5] The DPM mode CID extraction offset. */ 645/* [RW 5] The DPM mode CID extraction offset. */
@@ -692,6 +709,8 @@
692#define HC_REG_CONFIG_1 0x108004 709#define HC_REG_CONFIG_1 0x108004
693/* [RW 3] Parity mask register #0 read/write */ 710/* [RW 3] Parity mask register #0 read/write */
694#define HC_REG_HC_PRTY_MASK 0x1080a0 711#define HC_REG_HC_PRTY_MASK 0x1080a0
712/* [R 3] Parity register #0 read */
713#define HC_REG_HC_PRTY_STS 0x108094
695/* [RW 17] status block interrupt mask; one in each bit means unmask; zerow 714/* [RW 17] status block interrupt mask; one in each bit means unmask; zerow
696 in each bit means mask; bit 0 - default SB; bit 1 - SB_0; bit 2 - SB_1... 715 in each bit means mask; bit 0 - default SB; bit 1 - SB_0; bit 2 - SB_1...
697 bit 16- SB_15; addr 0 - port 0; addr 1 - port 1 */ 716 bit 16- SB_15; addr 0 - port 0; addr 1 - port 1 */
@@ -1127,6 +1146,7 @@
1127#define MISC_REG_AEU_GENERAL_ATTN_17 0xa044 1146#define MISC_REG_AEU_GENERAL_ATTN_17 0xa044
1128#define MISC_REG_AEU_GENERAL_ATTN_18 0xa048 1147#define MISC_REG_AEU_GENERAL_ATTN_18 0xa048
1129#define MISC_REG_AEU_GENERAL_ATTN_19 0xa04c 1148#define MISC_REG_AEU_GENERAL_ATTN_19 0xa04c
1149#define MISC_REG_AEU_GENERAL_ATTN_10 0xa028
1130#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c 1150#define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c
1131#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008 1151#define MISC_REG_AEU_GENERAL_ATTN_2 0xa008
1132#define MISC_REG_AEU_GENERAL_ATTN_20 0xa050 1152#define MISC_REG_AEU_GENERAL_ATTN_20 0xa050
@@ -1135,6 +1155,9 @@
1135#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010 1155#define MISC_REG_AEU_GENERAL_ATTN_4 0xa010
1136#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014 1156#define MISC_REG_AEU_GENERAL_ATTN_5 0xa014
1137#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018 1157#define MISC_REG_AEU_GENERAL_ATTN_6 0xa018
1158#define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c
1159#define MISC_REG_AEU_GENERAL_ATTN_8 0xa020
1160#define MISC_REG_AEU_GENERAL_ATTN_9 0xa024
1138/* [RW 32] first 32b for inverting the input for function 0; for each bit: 1161/* [RW 32] first 32b for inverting the input for function 0; for each bit:
1139 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for 1162 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
1140 function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp; 1163 function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
@@ -1183,6 +1206,40 @@
1183 starts at 0x0 for the A0 tape-out and increments by one for each 1206 starts at 0x0 for the A0 tape-out and increments by one for each
1184 all-layer tape-out. */ 1207 all-layer tape-out. */
1185#define MISC_REG_CHIP_REV 0xa40c 1208#define MISC_REG_CHIP_REV 0xa40c
1209/* [RW 32] The following driver registers(1..6) represent 6 drivers and 32
1210 clients. Each client can be controlled by one driver only. One in each
1211 bit represent that this driver control the appropriate client (Ex: bit 5
1212 is set means this driver control client number 5). addr1 = set; addr0 =
1213 clear; read from both addresses will give the same result = status. write
1214 to address 1 will set a request to control all the clients that their
1215 appropriate bit (in the write command) is set. if the client is free (the
1216 appropriate bit in all the other drivers is clear) one will be written to
1217 that driver register; if the client isn't free the bit will remain zero.
1218 if the appropriate bit is set (the driver request to gain control on a
1219 client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
1220 interrupt will be asserted). write to address 0 will set a request to
1221 free all the clients that their appropriate bit (in the write command) is
1222 set. if the appropriate bit is clear (the driver request to free a client
1223 it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
1224 be asserted). */
1225#define MISC_REG_DRIVER_CONTROL_1 0xa510
1226/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
1227 these bits is written as a '1'; the corresponding SPIO bit will turn off
1228 it's drivers and become an input. This is the reset state of all GPIO
1229 pins. The read value of these bits will be a '1' if that last command
1230 (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
1231 [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
1232 as a '1'; the corresponding GPIO bit will drive low. The read value of
1233 these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
1234 this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
1235 SET When any of these bits is written as a '1'; the corresponding GPIO
1236 bit will drive high (if it has that capability). The read value of these
1237 bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
1238 bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
1239 RO; These bits indicate the read value of each of the eight GPIO pins.
1240 This is the result value of the pin; not the drive value. Writing these
1241 bits will have not effect. */
1242#define MISC_REG_GPIO 0xa490
1186/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any 1243/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
1187 access that does not finish within 1244 access that does not finish within
1188 ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is 1245 ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
@@ -1223,6 +1280,8 @@
1223#define MISC_REG_MISC_INT_MASK 0xa388 1280#define MISC_REG_MISC_INT_MASK 0xa388
1224/* [RW 1] Parity mask register #0 read/write */ 1281/* [RW 1] Parity mask register #0 read/write */
1225#define MISC_REG_MISC_PRTY_MASK 0xa398 1282#define MISC_REG_MISC_PRTY_MASK 0xa398
1283/* [R 1] Parity register #0 read */
1284#define MISC_REG_MISC_PRTY_STS 0xa38c
1226/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911. 1285/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
1227 inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1 1286 inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
1228 divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1 1287 divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
@@ -1264,6 +1323,55 @@
1264/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is 1323/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
1265 shared with the driver resides */ 1324 shared with the driver resides */
1266#define MISC_REG_SHARED_MEM_ADDR 0xa2b4 1325#define MISC_REG_SHARED_MEM_ADDR 0xa2b4
1326/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
1327 the corresponding SPIO bit will turn off it's drivers and become an
1328 input. This is the reset state of all SPIO pins. The read value of these
1329 bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
1330 bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
1331 is written as a '1'; the corresponding SPIO bit will drive low. The read
1332 value of these bits will be a '1' if that last command (#SET; #CLR; or
1333#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
1334 these bits is written as a '1'; the corresponding SPIO bit will drive
1335 high (if it has that capability). The read value of these bits will be a
1336 '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
1337 (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
1338 each of the eight SPIO pins. This is the result value of the pin; not the
1339 drive value. Writing these bits will have not effect. Each 8 bits field
1340 is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
1341 from VAUX. (This is an output pin only; the FLOAT field is not applicable
1342 for this pin); [1] VAUX Disable; when pulsed low; disables supply form
1343 VAUX. (This is an output pin only; FLOAT field is not applicable for this
1344 pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
1345 select VAUX supply. (This is an output pin only; it is not controlled by
1346 the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
1347 field is not applicable for this pin; only the VALUE fields is relevant -
1348 it reflects the output value); [3] reserved; [4] spio_4; [5] spio_5; [6]
1349 Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
1350 device ID select; read by UMP firmware. */
1351#define MISC_REG_SPIO 0xa4fc
1352/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
1353 according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
1354 [7:0] reserved */
1355#define MISC_REG_SPIO_EVENT_EN 0xa2b8
1356/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
1357 corresponding bit in the #OLD_VALUE register. This will acknowledge an
1358 interrupt on the falling edge of corresponding SPIO input (reset value
1359 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
1360 in the #OLD_VALUE register. This will acknowledge an interrupt on the
1361 rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
1362 RO; These bits indicate the old value of the SPIO input value. When the
1363 ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
1364 that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
1365 to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
1366 interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
1367 RO; These bits indicate the current SPIO interrupt state for each SPIO
1368 pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
1369 command bit is written. This bit is set when the SPIO input does not
1370 match the current value in #OLD_VALUE (reset value 0). */
1371#define MISC_REG_SPIO_INT 0xa500
1372/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
1373 loaded; 0-prepare; -unprepare */
1374#define MISC_REG_UNPREPARED 0xa424
1267#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) 1375#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
1268#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) 1376#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
1269#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) 1377#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
@@ -1392,6 +1500,9 @@
1392#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044 1500#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044
1393/* [RW 1] Input enable for RX PBF LP IF */ 1501/* [RW 1] Input enable for RX PBF LP IF */
1394#define NIG_REG_PBF_LB_IN_EN 0x100b4 1502#define NIG_REG_PBF_LB_IN_EN 0x100b4
1503/* [RW 1] Value of this register will be transmitted to port swap when
1504 ~nig_registers_strap_override.strap_override =1 */
1505#define NIG_REG_PORT_SWAP 0x10394
1395/* [RW 1] output enable for RX parser descriptor IF */ 1506/* [RW 1] output enable for RX parser descriptor IF */
1396#define NIG_REG_PRS_EOP_OUT_EN 0x10104 1507#define NIG_REG_PRS_EOP_OUT_EN 0x10104
1397/* [RW 1] Input enable for RX parser request IF */ 1508/* [RW 1] Input enable for RX parser request IF */
@@ -1410,6 +1521,10 @@
1410#define NIG_REG_STAT2_BRB_OCTET 0x107e0 1521#define NIG_REG_STAT2_BRB_OCTET 0x107e0
1411#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 1522#define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328
1412#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c 1523#define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c
1524/* [RW 1] port swap mux selection. If this register equal to 0 then port
1525 swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
1526 ort swap is equal to ~nig_registers_port_swap.port_swap */
1527#define NIG_REG_STRAP_OVERRIDE 0x10398
1413/* [RW 1] output enable for RX_XCM0 IF */ 1528/* [RW 1] output enable for RX_XCM0 IF */
1414#define NIG_REG_XCM0_OUT_EN 0x100f0 1529#define NIG_REG_XCM0_OUT_EN 0x100f0
1415/* [RW 1] output enable for RX_XCM1 IF */ 1530/* [RW 1] output enable for RX_XCM1 IF */
@@ -1499,6 +1614,8 @@
1499#define PB_REG_PB_INT_STS 0x1c 1614#define PB_REG_PB_INT_STS 0x1c
1500/* [RW 4] Parity mask register #0 read/write */ 1615/* [RW 4] Parity mask register #0 read/write */
1501#define PB_REG_PB_PRTY_MASK 0x38 1616#define PB_REG_PB_PRTY_MASK 0x38
1617/* [R 4] Parity register #0 read */
1618#define PB_REG_PB_PRTY_STS 0x2c
1502#define PRS_REG_A_PRSU_20 0x40134 1619#define PRS_REG_A_PRSU_20 0x40134
1503/* [R 8] debug only: CFC load request current credit. Transaction based. */ 1620/* [R 8] debug only: CFC load request current credit. Transaction based. */
1504#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 1621#define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164
@@ -1590,6 +1707,8 @@
1590#define PRS_REG_PRS_INT_STS 0x40188 1707#define PRS_REG_PRS_INT_STS 0x40188
1591/* [RW 8] Parity mask register #0 read/write */ 1708/* [RW 8] Parity mask register #0 read/write */
1592#define PRS_REG_PRS_PRTY_MASK 0x401a4 1709#define PRS_REG_PRS_PRTY_MASK 0x401a4
1710/* [R 8] Parity register #0 read */
1711#define PRS_REG_PRS_PRTY_STS 0x40198
1593/* [RW 8] Context region for pure acknowledge packets. Used in CFC load 1712/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
1594 request message */ 1713 request message */
1595#define PRS_REG_PURE_REGIONS 0x40024 1714#define PRS_REG_PURE_REGIONS 0x40024
@@ -1718,6 +1837,9 @@
1718/* [RW 32] Parity mask register #0 read/write */ 1837/* [RW 32] Parity mask register #0 read/write */
1719#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 1838#define PXP2_REG_PXP2_PRTY_MASK_0 0x120588
1720#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 1839#define PXP2_REG_PXP2_PRTY_MASK_1 0x120598
1840/* [R 32] Parity register #0 read */
1841#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
1842#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
1721/* [R 1] Debug only: The 'almost full' indication from each fifo (gives 1843/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
1722 indication about backpressure) */ 1844 indication about backpressure) */
1723#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 1845#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@@ -1911,6 +2033,8 @@
1911#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8 2033#define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8
1912/* [WB 53] Onchip address table */ 2034/* [WB 53] Onchip address table */
1913#define PXP2_REG_RQ_ONCHIP_AT 0x122000 2035#define PXP2_REG_RQ_ONCHIP_AT 0x122000
2036/* [RW 13] Pending read limiter threshold; in Dwords */
2037#define PXP2_REG_RQ_PDR_LIMIT 0x12033c
1914/* [RW 2] Endian mode for qm */ 2038/* [RW 2] Endian mode for qm */
1915#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194 2039#define PXP2_REG_RQ_QM_ENDIAN_M 0x120194
1916/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; 2040/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
@@ -1921,6 +2045,9 @@
1921/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; 2045/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
1922 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ 2046 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
1923#define PXP2_REG_RQ_RD_MBS0 0x120160 2047#define PXP2_REG_RQ_RD_MBS0 0x120160
2048/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
2049 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
2050#define PXP2_REG_RQ_RD_MBS1 0x120168
1924/* [RW 2] Endian mode for src */ 2051/* [RW 2] Endian mode for src */
1925#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c 2052#define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c
1926/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k; 2053/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
@@ -2000,10 +2127,17 @@
2000/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B; 2127/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
2001 001:256B; 010: 512B; */ 2128 001:256B; 010: 512B; */
2002#define PXP2_REG_RQ_WR_MBS0 0x12015c 2129#define PXP2_REG_RQ_WR_MBS0 0x12015c
2130/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
2131 001:256B; 010: 512B; */
2132#define PXP2_REG_RQ_WR_MBS1 0x120164
2003/* [RW 10] if Number of entries in dmae fifo will be higer than this 2133/* [RW 10] if Number of entries in dmae fifo will be higer than this
2004 threshold then has_payload indication will be asserted; the default value 2134 threshold then has_payload indication will be asserted; the default value
2005 should be equal to &gt; write MBS size! */ 2135 should be equal to &gt; write MBS size! */
2006#define PXP2_REG_WR_DMAE_TH 0x120368 2136#define PXP2_REG_WR_DMAE_TH 0x120368
2137/* [RW 10] if Number of entries in usdmdp fifo will be higer than this
2138 threshold then has_payload indication will be asserted; the default value
2139 should be equal to &gt; write MBS size! */
2140#define PXP2_REG_WR_USDMDP_TH 0x120348
2007/* [R 1] debug only: Indication if PSWHST arbiter is idle */ 2141/* [R 1] debug only: Indication if PSWHST arbiter is idle */
2008#define PXP_REG_HST_ARB_IS_IDLE 0x103004 2142#define PXP_REG_HST_ARB_IS_IDLE 0x103004
2009/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means 2143/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
@@ -2021,6 +2155,8 @@
2021#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c 2155#define PXP_REG_PXP_INT_STS_CLR_0 0x10306c
2022/* [RW 26] Parity mask register #0 read/write */ 2156/* [RW 26] Parity mask register #0 read/write */
2023#define PXP_REG_PXP_PRTY_MASK 0x103094 2157#define PXP_REG_PXP_PRTY_MASK 0x103094
2158/* [R 26] Parity register #0 read */
2159#define PXP_REG_PXP_PRTY_STS 0x103088
2024/* [RW 4] The activity counter initial increment value sent in the load 2160/* [RW 4] The activity counter initial increment value sent in the load
2025 request */ 2161 request */
2026#define QM_REG_ACTCTRINITVAL_0 0x168040 2162#define QM_REG_ACTCTRINITVAL_0 0x168040
@@ -2127,6 +2263,8 @@
2127#define QM_REG_QM_INT_STS 0x168438 2263#define QM_REG_QM_INT_STS 0x168438
2128/* [RW 9] Parity mask register #0 read/write */ 2264/* [RW 9] Parity mask register #0 read/write */
2129#define QM_REG_QM_PRTY_MASK 0x168454 2265#define QM_REG_QM_PRTY_MASK 0x168454
2266/* [R 9] Parity register #0 read */
2267#define QM_REG_QM_PRTY_STS 0x168448
2130/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ 2268/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
2131#define QM_REG_QSTATUS_HIGH 0x16802c 2269#define QM_REG_QSTATUS_HIGH 0x16802c
2132/* [R 32] Current queues in pipeline: Queues from 0 to 31 */ 2270/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
@@ -2410,6 +2548,8 @@
2410#define SRC_REG_SRC_INT_STS 0x404ac 2548#define SRC_REG_SRC_INT_STS 0x404ac
2411/* [RW 3] Parity mask register #0 read/write */ 2549/* [RW 3] Parity mask register #0 read/write */
2412#define SRC_REG_SRC_PRTY_MASK 0x404c8 2550#define SRC_REG_SRC_PRTY_MASK 0x404c8
2551/* [R 3] Parity register #0 read */
2552#define SRC_REG_SRC_PRTY_STS 0x404bc
2413/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ 2553/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
2414#define TCM_REG_CAM_OCCUP 0x5017c 2554#define TCM_REG_CAM_OCCUP 0x5017c
2415/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 2555/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -2730,6 +2870,8 @@
2730#define TSDM_REG_TSDM_INT_MASK_1 0x422ac 2870#define TSDM_REG_TSDM_INT_MASK_1 0x422ac
2731/* [RW 11] Parity mask register #0 read/write */ 2871/* [RW 11] Parity mask register #0 read/write */
2732#define TSDM_REG_TSDM_PRTY_MASK 0x422bc 2872#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
2873/* [R 11] Parity register #0 read */
2874#define TSDM_REG_TSDM_PRTY_STS 0x422b0
2733/* [RW 5] The number of time_slots in the arbitration cycle */ 2875/* [RW 5] The number of time_slots in the arbitration cycle */
2734#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 2876#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
2735/* [RW 3] The source that is associated with arbitration element 0. Source 2877/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -2854,6 +2996,9 @@
2854/* [RW 32] Parity mask register #0 read/write */ 2996/* [RW 32] Parity mask register #0 read/write */
2855#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120 2997#define TSEM_REG_TSEM_PRTY_MASK_0 0x180120
2856#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130 2998#define TSEM_REG_TSEM_PRTY_MASK_1 0x180130
2999/* [R 32] Parity register #0 read */
3000#define TSEM_REG_TSEM_PRTY_STS_0 0x180114
3001#define TSEM_REG_TSEM_PRTY_STS_1 0x180124
2857/* [R 5] Used to read the XX protection CAM occupancy counter. */ 3002/* [R 5] Used to read the XX protection CAM occupancy counter. */
2858#define UCM_REG_CAM_OCCUP 0xe0170 3003#define UCM_REG_CAM_OCCUP 0xe0170
2859/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 3004/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3155,6 +3300,8 @@
3155#define USDM_REG_USDM_INT_MASK_1 0xc42b0 3300#define USDM_REG_USDM_INT_MASK_1 0xc42b0
3156/* [RW 11] Parity mask register #0 read/write */ 3301/* [RW 11] Parity mask register #0 read/write */
3157#define USDM_REG_USDM_PRTY_MASK 0xc42c0 3302#define USDM_REG_USDM_PRTY_MASK 0xc42c0
3303/* [R 11] Parity register #0 read */
3304#define USDM_REG_USDM_PRTY_STS 0xc42b4
3158/* [RW 5] The number of time_slots in the arbitration cycle */ 3305/* [RW 5] The number of time_slots in the arbitration cycle */
3159#define USEM_REG_ARB_CYCLE_SIZE 0x300034 3306#define USEM_REG_ARB_CYCLE_SIZE 0x300034
3160/* [RW 3] The source that is associated with arbitration element 0. Source 3307/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3279,6 +3426,9 @@
3279/* [RW 32] Parity mask register #0 read/write */ 3426/* [RW 32] Parity mask register #0 read/write */
3280#define USEM_REG_USEM_PRTY_MASK_0 0x300130 3427#define USEM_REG_USEM_PRTY_MASK_0 0x300130
3281#define USEM_REG_USEM_PRTY_MASK_1 0x300140 3428#define USEM_REG_USEM_PRTY_MASK_1 0x300140
3429/* [R 32] Parity register #0 read */
3430#define USEM_REG_USEM_PRTY_STS_0 0x300124
3431#define USEM_REG_USEM_PRTY_STS_1 0x300134
3282/* [RW 2] The queue index for registration on Aux1 counter flag. */ 3432/* [RW 2] The queue index for registration on Aux1 counter flag. */
3283#define XCM_REG_AUX1_Q 0x20134 3433#define XCM_REG_AUX1_Q 0x20134
3284/* [RW 2] Per each decision rule the queue index to register to. */ 3434/* [RW 2] Per each decision rule the queue index to register to. */
@@ -3684,6 +3834,8 @@
3684#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac 3834#define XSDM_REG_XSDM_INT_MASK_1 0x1662ac
3685/* [RW 11] Parity mask register #0 read/write */ 3835/* [RW 11] Parity mask register #0 read/write */
3686#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc 3836#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
3837/* [R 11] Parity register #0 read */
3838#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
3687/* [RW 5] The number of time_slots in the arbitration cycle */ 3839/* [RW 5] The number of time_slots in the arbitration cycle */
3688#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 3840#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
3689/* [RW 3] The source that is associated with arbitration element 0. Source 3841/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3808,6 +3960,9 @@
3808/* [RW 32] Parity mask register #0 read/write */ 3960/* [RW 32] Parity mask register #0 read/write */
3809#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130 3961#define XSEM_REG_XSEM_PRTY_MASK_0 0x280130
3810#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140 3962#define XSEM_REG_XSEM_PRTY_MASK_1 0x280140
3963/* [R 32] Parity register #0 read */
3964#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
3965#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
3811#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) 3966#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
3812#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) 3967#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
3813#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) 3968#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -3847,6 +4002,8 @@
3847#define EMAC_MDIO_COMM_START_BUSY (1L<<29) 4002#define EMAC_MDIO_COMM_START_BUSY (1L<<29)
3848#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) 4003#define EMAC_MDIO_MODE_AUTO_POLL (1L<<4)
3849#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) 4004#define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31)
4005#define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16)
4006#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16
3850#define EMAC_MODE_25G_MODE (1L<<5) 4007#define EMAC_MODE_25G_MODE (1L<<5)
3851#define EMAC_MODE_ACPI_RCVD (1L<<20) 4008#define EMAC_MODE_ACPI_RCVD (1L<<20)
3852#define EMAC_MODE_HALF_DUPLEX (1L<<1) 4009#define EMAC_MODE_HALF_DUPLEX (1L<<1)
@@ -3874,6 +4031,17 @@
3874#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) 4031#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
3875#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) 4032#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
3876#define EMAC_TX_MODE_RESET (1L<<0) 4033#define EMAC_TX_MODE_RESET (1L<<0)
4034#define MISC_REGISTERS_GPIO_1 1
4035#define MISC_REGISTERS_GPIO_2 2
4036#define MISC_REGISTERS_GPIO_3 3
4037#define MISC_REGISTERS_GPIO_CLR_POS 16
4038#define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24)
4039#define MISC_REGISTERS_GPIO_FLOAT_POS 24
4040#define MISC_REGISTERS_GPIO_INPUT_HI_Z 2
4041#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1
4042#define MISC_REGISTERS_GPIO_OUTPUT_LOW 0
4043#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4044#define MISC_REGISTERS_GPIO_SET_POS 8
3877#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 4045#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
3878#define MISC_REGISTERS_RESET_REG_1_SET 0x584 4046#define MISC_REGISTERS_RESET_REG_1_SET 0x584
3879#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 4047#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
@@ -3891,6 +4059,25 @@
3891#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4) 4059#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4)
3892#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8) 4060#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
3893#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 4061#define MISC_REGISTERS_RESET_REG_3_SET 0x5a4
4062#define MISC_REGISTERS_SPIO_4 4
4063#define MISC_REGISTERS_SPIO_5 5
4064#define MISC_REGISTERS_SPIO_7 7
4065#define MISC_REGISTERS_SPIO_CLR_POS 16
4066#define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24)
4067#define GRC_MISC_REGISTERS_SPIO_FLOAT7 0x80000000
4068#define GRC_MISC_REGISTERS_SPIO_FLOAT6 0x40000000
4069#define GRC_MISC_REGISTERS_SPIO_FLOAT5 0x20000000
4070#define GRC_MISC_REGISTERS_SPIO_FLOAT4 0x10000000
4071#define MISC_REGISTERS_SPIO_FLOAT_POS 24
4072#define MISC_REGISTERS_SPIO_INPUT_HI_Z 2
4073#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16
4074#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
4075#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
4076#define MISC_REGISTERS_SPIO_SET_POS 8
4077#define HW_LOCK_MAX_RESOURCE_VALUE 31
4078#define HW_LOCK_RESOURCE_8072_MDIO 0
4079#define HW_LOCK_RESOURCE_GPIO 1
4080#define HW_LOCK_RESOURCE_SPIO 2
3894#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) 4081#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
3895#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) 4082#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
3896#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) 4083#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -3918,6 +4105,7 @@
3918#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3) 4105#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3)
3919#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2) 4106#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2)
3920#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22) 4107#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22)
4108#define AEU_INPUTS_ATTN_BITS_SPIO5 (1<<15)
3921#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27) 4109#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27)
3922#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5) 4110#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5)
3923#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25) 4111#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25)
@@ -4206,6 +4394,9 @@
4206#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000 4394#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000
4207#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11 4395#define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11
4208#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000 4396#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000
4397#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14
4398#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001
4399#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010
4209#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15 4400#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15
4210 4401
4211#define MDIO_REG_BANK_GP_STATUS 0x8120 4402#define MDIO_REG_BANK_GP_STATUS 0x8120
@@ -4362,11 +4553,13 @@
4362#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001 4553#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001
4363 4554
4364 4555
4556#define EXT_PHY_AUTO_NEG_DEVAD 0x7
4365#define EXT_PHY_OPT_PMA_PMD_DEVAD 0x1 4557#define EXT_PHY_OPT_PMA_PMD_DEVAD 0x1
4366#define EXT_PHY_OPT_WIS_DEVAD 0x2 4558#define EXT_PHY_OPT_WIS_DEVAD 0x2
4367#define EXT_PHY_OPT_PCS_DEVAD 0x3 4559#define EXT_PHY_OPT_PCS_DEVAD 0x3
4368#define EXT_PHY_OPT_PHY_XS_DEVAD 0x4 4560#define EXT_PHY_OPT_PHY_XS_DEVAD 0x4
4369#define EXT_PHY_OPT_CNTL 0x0 4561#define EXT_PHY_OPT_CNTL 0x0
4562#define EXT_PHY_OPT_CNTL2 0x7
4370#define EXT_PHY_OPT_PMD_RX_SD 0xa 4563#define EXT_PHY_OPT_PMD_RX_SD 0xa
4371#define EXT_PHY_OPT_PMD_MISC_CNTL 0xca0a 4564#define EXT_PHY_OPT_PMD_MISC_CNTL 0xca0a
4372#define EXT_PHY_OPT_PHY_IDENTIFIER 0xc800 4565#define EXT_PHY_OPT_PHY_IDENTIFIER 0xc800
@@ -4378,11 +4571,24 @@
4378#define EXT_PHY_OPT_LASI_STATUS 0x9005 4571#define EXT_PHY_OPT_LASI_STATUS 0x9005
4379#define EXT_PHY_OPT_PCS_STATUS 0x0020 4572#define EXT_PHY_OPT_PCS_STATUS 0x0020
4380#define EXT_PHY_OPT_XGXS_LANE_STATUS 0x0018 4573#define EXT_PHY_OPT_XGXS_LANE_STATUS 0x0018
4574#define EXT_PHY_OPT_AN_LINK_STATUS 0x8304
4575#define EXT_PHY_OPT_AN_CL37_CL73 0x8370
4576#define EXT_PHY_OPT_AN_CL37_FD 0xffe4
4577#define EXT_PHY_OPT_AN_CL37_AN 0xffe0
4578#define EXT_PHY_OPT_AN_ADV 0x11
4381 4579
4382#define EXT_PHY_KR_PMA_PMD_DEVAD 0x1 4580#define EXT_PHY_KR_PMA_PMD_DEVAD 0x1
4383#define EXT_PHY_KR_PCS_DEVAD 0x3 4581#define EXT_PHY_KR_PCS_DEVAD 0x3
4384#define EXT_PHY_KR_AUTO_NEG_DEVAD 0x7 4582#define EXT_PHY_KR_AUTO_NEG_DEVAD 0x7
4385#define EXT_PHY_KR_CTRL 0x0000 4583#define EXT_PHY_KR_CTRL 0x0000
4584#define EXT_PHY_KR_STATUS 0x0001
4585#define EXT_PHY_KR_AUTO_NEG_COMPLETE 0x0020
4586#define EXT_PHY_KR_AUTO_NEG_ADVERT 0x0010
4587#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE 0x0400
4588#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC 0x0800
4589#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH 0x0C00
4590#define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK 0x0C00
4591#define EXT_PHY_KR_LP_AUTO_NEG 0x0013
4386#define EXT_PHY_KR_CTRL2 0x0007 4592#define EXT_PHY_KR_CTRL2 0x0007
4387#define EXT_PHY_KR_PCS_STATUS 0x0020 4593#define EXT_PHY_KR_PCS_STATUS 0x0020
4388#define EXT_PHY_KR_PMD_CTRL 0x0096 4594#define EXT_PHY_KR_PMD_CTRL 0x0096
@@ -4391,4 +4597,8 @@
4391#define EXT_PHY_KR_MISC_CTRL1 0xca85 4597#define EXT_PHY_KR_MISC_CTRL1 0xca85
4392#define EXT_PHY_KR_GEN_CTRL 0xca10 4598#define EXT_PHY_KR_GEN_CTRL 0xca10
4393#define EXT_PHY_KR_ROM_CODE 0xca19 4599#define EXT_PHY_KR_ROM_CODE 0xca19
4600#define EXT_PHY_KR_ROM_RESET_INTERNAL_MP 0x0188
4601#define EXT_PHY_KR_ROM_MICRO_RESET 0x018a
4602
4603#define EXT_PHY_SFX7101_XGXS_TEST1 0xc00a
4394 4604
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 571750975137..348371fda597 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -172,30 +172,30 @@ static char version[] __initdata =
172 them to system IRQ numbers. This mapping is card specific and is set to 172 them to system IRQ numbers. This mapping is card specific and is set to
173 the configuration of the Cirrus Eval board for this chip. */ 173 the configuration of the Cirrus Eval board for this chip. */
174#ifdef CONFIG_ARCH_CLPS7500 174#ifdef CONFIG_ARCH_CLPS7500
175static unsigned int netcard_portlist[] __initdata = 175static unsigned int netcard_portlist[] __used __initdata =
176 { 0x80090303, 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; 176 { 0x80090303, 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
177static unsigned int cs8900_irq_map[] = {12,0,0,0}; 177static unsigned int cs8900_irq_map[] = {12,0,0,0};
178#elif defined(CONFIG_SH_HICOSH4) 178#elif defined(CONFIG_SH_HICOSH4)
179static unsigned int netcard_portlist[] __initdata = 179static unsigned int netcard_portlist[] __used __initdata =
180 { 0x0300, 0}; 180 { 0x0300, 0};
181static unsigned int cs8900_irq_map[] = {1,0,0,0}; 181static unsigned int cs8900_irq_map[] = {1,0,0,0};
182#elif defined(CONFIG_MACH_IXDP2351) 182#elif defined(CONFIG_MACH_IXDP2351)
183static unsigned int netcard_portlist[] __initdata = {IXDP2351_VIRT_CS8900_BASE, 0}; 183static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
184static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; 184static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
185#include <asm/irq.h> 185#include <asm/irq.h>
186#elif defined(CONFIG_ARCH_IXDP2X01) 186#elif defined(CONFIG_ARCH_IXDP2X01)
187#include <asm/irq.h> 187#include <asm/irq.h>
188static unsigned int netcard_portlist[] __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; 188static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
189static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; 189static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
190#elif defined(CONFIG_ARCH_PNX010X) 190#elif defined(CONFIG_ARCH_PNX010X)
191#include <asm/irq.h> 191#include <asm/irq.h>
192#include <asm/arch/gpio.h> 192#include <asm/arch/gpio.h>
193#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */ 193#define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */
194#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */ 194#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
195static unsigned int netcard_portlist[] __initdata = {CIRRUS_DEFAULT_BASE, 0}; 195static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0};
196static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0}; 196static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0};
197#else 197#else
198static unsigned int netcard_portlist[] __initdata = 198static unsigned int netcard_portlist[] __used __initdata =
199 { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; 199 { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
200static unsigned int cs8900_irq_map[] = {10,11,12,5}; 200static unsigned int cs8900_irq_map[] = {10,11,12,5};
201#endif 201#endif
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 36ba6dc96acc..cdf3090a1885 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2782,16 +2782,13 @@ static void __devexit e100_remove(struct pci_dev *pdev)
2782 } 2782 }
2783} 2783}
2784 2784
2785#ifdef CONFIG_PM
2786static int e100_suspend(struct pci_dev *pdev, pm_message_t state) 2785static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2787{ 2786{
2788 struct net_device *netdev = pci_get_drvdata(pdev); 2787 struct net_device *netdev = pci_get_drvdata(pdev);
2789 struct nic *nic = netdev_priv(netdev); 2788 struct nic *nic = netdev_priv(netdev);
2790 2789
2791 if (netif_running(netdev)) 2790 if (netif_running(netdev))
2792 napi_disable(&nic->napi); 2791 e100_down(nic);
2793 del_timer_sync(&nic->watchdog);
2794 netif_carrier_off(nic->netdev);
2795 netif_device_detach(netdev); 2792 netif_device_detach(netdev);
2796 2793
2797 pci_save_state(pdev); 2794 pci_save_state(pdev);
@@ -2804,14 +2801,13 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2804 pci_enable_wake(pdev, PCI_D3cold, 0); 2801 pci_enable_wake(pdev, PCI_D3cold, 0);
2805 } 2802 }
2806 2803
2807 free_irq(pdev->irq, netdev);
2808
2809 pci_disable_device(pdev); 2804 pci_disable_device(pdev);
2810 pci_set_power_state(pdev, PCI_D3hot); 2805 pci_set_power_state(pdev, PCI_D3hot);
2811 2806
2812 return 0; 2807 return 0;
2813} 2808}
2814 2809
2810#ifdef CONFIG_PM
2815static int e100_resume(struct pci_dev *pdev) 2811static int e100_resume(struct pci_dev *pdev)
2816{ 2812{
2817 struct net_device *netdev = pci_get_drvdata(pdev); 2813 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2832,26 +2828,7 @@ static int e100_resume(struct pci_dev *pdev)
2832 2828
2833static void e100_shutdown(struct pci_dev *pdev) 2829static void e100_shutdown(struct pci_dev *pdev)
2834{ 2830{
2835 struct net_device *netdev = pci_get_drvdata(pdev); 2831 e100_suspend(pdev, PMSG_SUSPEND);
2836 struct nic *nic = netdev_priv(netdev);
2837
2838 if (netif_running(netdev))
2839 napi_disable(&nic->napi);
2840 del_timer_sync(&nic->watchdog);
2841 netif_carrier_off(nic->netdev);
2842
2843 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2844 pci_enable_wake(pdev, PCI_D3hot, 1);
2845 pci_enable_wake(pdev, PCI_D3cold, 1);
2846 } else {
2847 pci_enable_wake(pdev, PCI_D3hot, 0);
2848 pci_enable_wake(pdev, PCI_D3cold, 0);
2849 }
2850
2851 free_irq(pdev->irq, netdev);
2852
2853 pci_disable_device(pdev);
2854 pci_set_power_state(pdev, PCI_D3hot);
2855} 2832}
2856 2833
2857/* ------------------ PCI Error Recovery infrastructure -------------- */ 2834/* ------------------ PCI Error Recovery infrastructure -------------- */
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 3beace55b58d..7fe20310eb5f 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -438,7 +438,7 @@ static void e1000_release_nvm_82571(struct e1000_hw *hw)
438 * For non-82573 silicon, write data to EEPROM at offset using SPI interface. 438 * For non-82573 silicon, write data to EEPROM at offset using SPI interface.
439 * 439 *
440 * If e1000e_update_nvm_checksum is not called after this function, the 440 * If e1000e_update_nvm_checksum is not called after this function, the
441 * EEPROM will most likley contain an invalid checksum. 441 * EEPROM will most likely contain an invalid checksum.
442 **/ 442 **/
443static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, 443static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
444 u16 *data) 444 u16 *data)
@@ -547,7 +547,7 @@ static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
547 * poll for completion. 547 * poll for completion.
548 * 548 *
549 * If e1000e_update_nvm_checksum is not called after this function, the 549 * If e1000e_update_nvm_checksum is not called after this function, the
550 * EEPROM will most likley contain an invalid checksum. 550 * EEPROM will most likely contain an invalid checksum.
551 **/ 551 **/
552static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, 552static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
553 u16 words, u16 *data) 553 u16 words, u16 *data)
@@ -1053,7 +1053,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
1053 /* If SerDes loopback mode is entered, there is no form 1053 /* If SerDes loopback mode is entered, there is no form
1054 * of reset to take the adapter out of that mode. So we 1054 * of reset to take the adapter out of that mode. So we
1055 * have to explicitly take the adapter out of loopback 1055 * have to explicitly take the adapter out of loopback
1056 * mode. This prevents drivers from twidling their thumbs 1056 * mode. This prevents drivers from twiddling their thumbs
1057 * if another tool failed to take it out of loopback mode. 1057 * if another tool failed to take it out of loopback mode.
1058 */ 1058 */
1059 ew32(SCTL, 1059 ew32(SCTL,
@@ -1098,7 +1098,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
1098 * e1000e_get_laa_state_82571 - Get locally administered address state 1098 * e1000e_get_laa_state_82571 - Get locally administered address state
1099 * @hw: pointer to the HW structure 1099 * @hw: pointer to the HW structure
1100 * 1100 *
1101 * Retrieve and return the current locally administed address state. 1101 * Retrieve and return the current locally administered address state.
1102 **/ 1102 **/
1103bool e1000e_get_laa_state_82571(struct e1000_hw *hw) 1103bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
1104{ 1104{
@@ -1113,7 +1113,7 @@ bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
1113 * @hw: pointer to the HW structure 1113 * @hw: pointer to the HW structure
1114 * @state: enable/disable locally administered address 1114 * @state: enable/disable locally administered address
1115 * 1115 *
1116 * Enable/Disable the current locally administed address state. 1116 * Enable/Disable the current locally administers address state.
1117 **/ 1117 **/
1118void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) 1118void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1119{ 1119{
@@ -1281,16 +1281,6 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
1281 1281
1282static struct e1000_nvm_operations e82571_nvm_ops = { 1282static struct e1000_nvm_operations e82571_nvm_ops = {
1283 .acquire_nvm = e1000_acquire_nvm_82571, 1283 .acquire_nvm = e1000_acquire_nvm_82571,
1284 .read_nvm = e1000e_read_nvm_spi,
1285 .release_nvm = e1000_release_nvm_82571,
1286 .update_nvm = e1000_update_nvm_checksum_82571,
1287 .valid_led_default = e1000_valid_led_default_82571,
1288 .validate_nvm = e1000_validate_nvm_checksum_82571,
1289 .write_nvm = e1000_write_nvm_82571,
1290};
1291
1292static struct e1000_nvm_operations e82573_nvm_ops = {
1293 .acquire_nvm = e1000_acquire_nvm_82571,
1294 .read_nvm = e1000e_read_nvm_eerd, 1284 .read_nvm = e1000e_read_nvm_eerd,
1295 .release_nvm = e1000_release_nvm_82571, 1285 .release_nvm = e1000_release_nvm_82571,
1296 .update_nvm = e1000_update_nvm_checksum_82571, 1286 .update_nvm = e1000_update_nvm_checksum_82571,
@@ -1355,6 +1345,6 @@ struct e1000_info e1000_82573_info = {
1355 .get_invariants = e1000_get_invariants_82571, 1345 .get_invariants = e1000_get_invariants_82571,
1356 .mac_ops = &e82571_mac_ops, 1346 .mac_ops = &e82571_mac_ops,
1357 .phy_ops = &e82_phy_ops_m88, 1347 .phy_ops = &e82_phy_ops_m88,
1358 .nvm_ops = &e82573_nvm_ops, 1348 .nvm_ops = &e82571_nvm_ops,
1359}; 1349};
1360 1350
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 6232c3e96689..a4f511f549f7 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -66,7 +66,7 @@
66#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 66#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
67 67
68/* Extended Device Control */ 68/* Extended Device Control */
69#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ 69#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */
70#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 70#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
71#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 71#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -75,12 +75,12 @@
75#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 75#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
76#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 76#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
77 77
78/* Receive Decriptor bit definitions */ 78/* Receive Descriptor bit definitions */
79#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 79#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
80#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 80#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */
81#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 81#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */
82#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 82#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
83#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 83#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
84#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 84#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
85#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 85#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
86#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 86#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
@@ -223,7 +223,7 @@
223#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ 223#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
224#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 224#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
225 225
226/* Constants used to intrepret the masked PCI-X bus speed. */ 226/* Constants used to interpret the masked PCI-X bus speed. */
227 227
228#define HALF_DUPLEX 1 228#define HALF_DUPLEX 1
229#define FULL_DUPLEX 2 229#define FULL_DUPLEX 2
@@ -517,7 +517,7 @@
517/* PHY 1000 MII Register/Bit Definitions */ 517/* PHY 1000 MII Register/Bit Definitions */
518/* PHY Registers defined by IEEE */ 518/* PHY Registers defined by IEEE */
519#define PHY_CONTROL 0x00 /* Control Register */ 519#define PHY_CONTROL 0x00 /* Control Register */
520#define PHY_STATUS 0x01 /* Status Regiser */ 520#define PHY_STATUS 0x01 /* Status Register */
521#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ 521#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
522#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ 522#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
523#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ 523#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 8b88c226e858..327c0620da31 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -42,8 +42,7 @@
42struct e1000_info; 42struct e1000_info;
43 43
44#define ndev_printk(level, netdev, format, arg...) \ 44#define ndev_printk(level, netdev, format, arg...) \
45 printk(level "%s: %s: " format, (netdev)->dev.parent->bus_id, \ 45 printk(level "%s: " format, (netdev)->name, ## arg)
46 (netdev)->name, ## arg)
47 46
48#ifdef DEBUG 47#ifdef DEBUG
49#define ndev_dbg(netdev, format, arg...) \ 48#define ndev_dbg(netdev, format, arg...) \
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 3c5862f97dbf..916025b30fc3 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -184,7 +184,7 @@ enum e1e_registers {
184 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ 184 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
185 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ 185 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
186 E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ 186 E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */
187 E1000_RFCTL = 0x05008, /* Receive Filter Control*/ 187 E1000_RFCTL = 0x05008, /* Receive Filter Control */
188 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ 188 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
189 E1000_RA = 0x05400, /* Receive Address - RW Array */ 189 E1000_RA = 0x05400, /* Receive Address - RW Array */
190 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ 190 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
@@ -202,7 +202,7 @@ enum e1e_registers {
202 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ 202 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
203 E1000_SWSM = 0x05B50, /* SW Semaphore */ 203 E1000_SWSM = 0x05B50, /* SW Semaphore */
204 E1000_FWSM = 0x05B54, /* FW Semaphore */ 204 E1000_FWSM = 0x05B54, /* FW Semaphore */
205 E1000_HICR = 0x08F00, /* Host Inteface Control */ 205 E1000_HICR = 0x08F00, /* Host Interface Control */
206}; 206};
207 207
208/* RSS registers */ 208/* RSS registers */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8f8139de1f48..0ae39550768d 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -671,7 +671,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
671 * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY 671 * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY
672 * @hw: pointer to the HW structure 672 * @hw: pointer to the HW structure
673 * 673 *
674 * Polarity is determined on the polarity reveral feature being enabled. 674 * Polarity is determined on the polarity reversal feature being enabled.
675 * This function is only called by other family-specific 675 * This function is only called by other family-specific
676 * routines. 676 * routines.
677 **/ 677 **/
@@ -947,7 +947,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
947 /* Either we should have a hardware SPI cycle in progress 947 /* Either we should have a hardware SPI cycle in progress
948 * bit to check against, in order to start a new cycle or 948 * bit to check against, in order to start a new cycle or
949 * FDONE bit should be changed in the hardware so that it 949 * FDONE bit should be changed in the hardware so that it
950 * is 1 after harware reset, which can then be used as an 950 * is 1 after hardware reset, which can then be used as an
951 * indication whether a cycle is in progress or has been 951 * indication whether a cycle is in progress or has been
952 * completed. 952 * completed.
953 */ 953 */
@@ -1155,7 +1155,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1155 * which writes the checksum to the shadow ram. The changes in the shadow 1155 * which writes the checksum to the shadow ram. The changes in the shadow
1156 * ram are then committed to the EEPROM by processing each bank at a time 1156 * ram are then committed to the EEPROM by processing each bank at a time
1157 * checking for the modified bit and writing only the pending changes. 1157 * checking for the modified bit and writing only the pending changes.
1158 * After a succesful commit, the shadow ram is cleared and is ready for 1158 * After a successful commit, the shadow ram is cleared and is ready for
1159 * future writes. 1159 * future writes.
1160 **/ 1160 **/
1161static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) 1161static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
@@ -1680,7 +1680,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1680 * - initialize LED identification 1680 * - initialize LED identification
1681 * - setup receive address registers 1681 * - setup receive address registers
1682 * - setup flow control 1682 * - setup flow control
1683 * - setup transmit discriptors 1683 * - setup transmit descriptors
1684 * - clear statistics 1684 * - clear statistics
1685 **/ 1685 **/
1686static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) 1686static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
@@ -1961,7 +1961,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1961 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 1961 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
1962 ew32(PHY_CTRL, phy_ctrl); 1962 ew32(PHY_CTRL, phy_ctrl);
1963 1963
1964 /* Call gig speed drop workaround on Giga disable before accessing 1964 /* Call gig speed drop workaround on Gig disable before accessing
1965 * any PHY registers */ 1965 * any PHY registers */
1966 e1000e_gig_downshift_workaround_ich8lan(hw); 1966 e1000e_gig_downshift_workaround_ich8lan(hw);
1967 1967
@@ -1972,7 +1972,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1972/** 1972/**
1973 * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state 1973 * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state
1974 * @hw: pointer to the HW structure 1974 * @hw: pointer to the HW structure
1975 * @state: boolean value used to set the current Kumaran workaround state 1975 * @state: boolean value used to set the current Kumeran workaround state
1976 * 1976 *
1977 * If ICH8, set the current Kumeran workaround state (enabled - TRUE 1977 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
1978 * /disabled - FALSE). 1978 * /disabled - FALSE).
@@ -2017,7 +2017,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
2017 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2017 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
2018 ew32(PHY_CTRL, reg); 2018 ew32(PHY_CTRL, reg);
2019 2019
2020 /* Call gig speed drop workaround on Giga disable before 2020 /* Call gig speed drop workaround on Gig disable before
2021 * accessing any PHY registers */ 2021 * accessing any PHY registers */
2022 if (hw->mac.type == e1000_ich8lan) 2022 if (hw->mac.type == e1000_ich8lan)
2023 e1000e_gig_downshift_workaround_ich8lan(hw); 2023 e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -2045,7 +2045,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
2045 * @hw: pointer to the HW structure 2045 * @hw: pointer to the HW structure
2046 * 2046 *
2047 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), 2047 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
2048 * LPLU, Giga disable, MDIC PHY reset): 2048 * LPLU, Gig disable, MDIC PHY reset):
2049 * 1) Set Kumeran Near-end loopback 2049 * 1) Set Kumeran Near-end loopback
2050 * 2) Clear Kumeran Near-end loopback 2050 * 2) Clear Kumeran Near-end loopback
2051 * Should only be called for ICH8[m] devices with IGP_3 Phy. 2051 * Should only be called for ICH8[m] devices with IGP_3 Phy.
@@ -2089,10 +2089,10 @@ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
2089} 2089}
2090 2090
2091/** 2091/**
2092 * e1000_led_on_ich8lan - Turn LED's on 2092 * e1000_led_on_ich8lan - Turn LEDs on
2093 * @hw: pointer to the HW structure 2093 * @hw: pointer to the HW structure
2094 * 2094 *
2095 * Turn on the LED's. 2095 * Turn on the LEDs.
2096 **/ 2096 **/
2097static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) 2097static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
2098{ 2098{
@@ -2105,10 +2105,10 @@ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
2105} 2105}
2106 2106
2107/** 2107/**
2108 * e1000_led_off_ich8lan - Turn LED's off 2108 * e1000_led_off_ich8lan - Turn LEDs off
2109 * @hw: pointer to the HW structure 2109 * @hw: pointer to the HW structure
2110 * 2110 *
2111 * Turn off the LED's. 2111 * Turn off the LEDs.
2112 **/ 2112 **/
2113static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) 2113static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
2114{ 2114{
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 16f35fadb74b..95f75a43c9f9 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -589,9 +589,6 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
589 s32 ret_val; 589 s32 ret_val;
590 u16 nvm_data; 590 u16 nvm_data;
591 591
592 if (mac->fc != e1000_fc_default)
593 return 0;
594
595 /* Read and store word 0x0F of the EEPROM. This word contains bits 592 /* Read and store word 0x0F of the EEPROM. This word contains bits
596 * that determine the hardware's default PAUSE (flow control) mode, 593 * that determine the hardware's default PAUSE (flow control) mode,
597 * a bit that determines whether the HW defaults to enabling or 594 * a bit that determines whether the HW defaults to enabling or
@@ -1107,34 +1104,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1107 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1104 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1108 mac->fc = e1000_fc_rx_pause; 1105 mac->fc = e1000_fc_rx_pause;
1109 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); 1106 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1110 } 1107 } else {
1111 /* Per the IEEE spec, at this point flow control should be 1108 /*
1112 * disabled. However, we want to consider that we could 1109 * Per the IEEE spec, at this point flow control
1113 * be connected to a legacy switch that doesn't advertise 1110 * should be disabled.
1114 * desired flow control, but can be forced on the link 1111 */
1115 * partner. So if we advertised no flow control, that is
1116 * what we will resolve to. If we advertised some kind of
1117 * receive capability (Rx Pause Only or Full Flow Control)
1118 * and the link partner advertised none, we will configure
1119 * ourselves to enable Rx Flow Control only. We can do
1120 * this safely for two reasons: If the link partner really
1121 * didn't want flow control enabled, and we enable Rx, no
1122 * harm done since we won't be receiving any PAUSE frames
1123 * anyway. If the intent on the link partner was to have
1124 * flow control enabled, then by us enabling RX only, we
1125 * can at least receive pause frames and process them.
1126 * This is a good idea because in most cases, since we are
1127 * predominantly a server NIC, more times than not we will
1128 * be asked to delay transmission of packets than asking
1129 * our link partner to pause transmission of frames.
1130 */
1131 else if ((mac->original_fc == e1000_fc_none) ||
1132 (mac->original_fc == e1000_fc_tx_pause)) {
1133 mac->fc = e1000_fc_none; 1112 mac->fc = e1000_fc_none;
1134 hw_dbg(hw, "Flow Control = NONE.\r\n"); 1113 hw_dbg(hw, "Flow Control = NONE.\r\n");
1135 } else {
1136 mac->fc = e1000_fc_rx_pause;
1137 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
1138 } 1114 }
1139 1115
1140 /* Now we need to do one last check... If we auto- 1116 /* Now we need to do one last check... If we auto-
@@ -1164,7 +1140,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1164} 1140}
1165 1141
1166/** 1142/**
1167 * e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex 1143 * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
1168 * @hw: pointer to the HW structure 1144 * @hw: pointer to the HW structure
1169 * @speed: stores the current speed 1145 * @speed: stores the current speed
1170 * @duplex: stores the current duplex 1146 * @duplex: stores the current duplex
@@ -1200,7 +1176,7 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
1200} 1176}
1201 1177
1202/** 1178/**
1203 * e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex 1179 * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
1204 * @hw: pointer to the HW structure 1180 * @hw: pointer to the HW structure
1205 * @speed: stores the current speed 1181 * @speed: stores the current speed
1206 * @duplex: stores the current duplex 1182 * @duplex: stores the current duplex
@@ -1410,7 +1386,7 @@ s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1410 * e1000e_blink_led - Blink LED 1386 * e1000e_blink_led - Blink LED
1411 * @hw: pointer to the HW structure 1387 * @hw: pointer to the HW structure
1412 * 1388 *
1413 * Blink the led's which are set to be on. 1389 * Blink the LEDs which are set to be on.
1414 **/ 1390 **/
1415s32 e1000e_blink_led(struct e1000_hw *hw) 1391s32 e1000e_blink_led(struct e1000_hw *hw)
1416{ 1392{
@@ -1515,7 +1491,7 @@ void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1515 * @hw: pointer to the HW structure 1491 * @hw: pointer to the HW structure
1516 * 1492 *
1517 * Returns 0 if successful, else returns -10 1493 * Returns 0 if successful, else returns -10
1518 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued 1494 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1519 * the master requests to be disabled. 1495 * the master requests to be disabled.
1520 * 1496 *
1521 * Disables PCI-Express master access and verifies there are no pending 1497 * Disables PCI-Express master access and verifies there are no pending
@@ -1876,7 +1852,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1876} 1852}
1877 1853
1878/** 1854/**
1879 * e1000e_read_nvm_spi - Read EEPROM's using SPI 1855 * e1000e_read_nvm_spi - Reads EEPROM using SPI
1880 * @hw: pointer to the HW structure 1856 * @hw: pointer to the HW structure
1881 * @offset: offset of word in the EEPROM to read 1857 * @offset: offset of word in the EEPROM to read
1882 * @words: number of words to read 1858 * @words: number of words to read
@@ -1980,7 +1956,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1980 * Writes data to EEPROM at offset using SPI interface. 1956 * Writes data to EEPROM at offset using SPI interface.
1981 * 1957 *
1982 * If e1000e_update_nvm_checksum is not called after this function , the 1958 * If e1000e_update_nvm_checksum is not called after this function , the
1983 * EEPROM will most likley contain an invalid checksum. 1959 * EEPROM will most likely contain an invalid checksum.
1984 **/ 1960 **/
1985s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 1961s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1986{ 1962{
@@ -2222,7 +2198,7 @@ static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2222 * 2198 *
2223 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND 2199 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2224 * 2200 *
2225 * This function checks whether the HOST IF is enabled for command operaton 2201 * This function checks whether the HOST IF is enabled for command operation
2226 * and also checks whether the previous command is completed. It busy waits 2202 * and also checks whether the previous command is completed. It busy waits
2227 * in case of previous command is not completed. 2203 * in case of previous command is not completed.
2228 **/ 2204 **/
@@ -2254,7 +2230,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2254} 2230}
2255 2231
2256/** 2232/**
2257 * e1000e_check_mng_mode - check managament mode 2233 * e1000e_check_mng_mode - check management mode
2258 * @hw: pointer to the HW structure 2234 * @hw: pointer to the HW structure
2259 * 2235 *
2260 * Reads the firmware semaphore register and returns true (>0) if 2236 * Reads the firmware semaphore register and returns true (>0) if
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3031d6d16247..fc5c63f4f578 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1006,7 +1006,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
1006 * e1000_get_hw_control - get control of the h/w from f/w 1006 * e1000_get_hw_control - get control of the h/w from f/w
1007 * @adapter: address of board private structure 1007 * @adapter: address of board private structure
1008 * 1008 *
1009 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 1009 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1010 * For ASF and Pass Through versions of f/w this means that 1010 * For ASF and Pass Through versions of f/w this means that
1011 * the driver is loaded. For AMT version (only with 82573) 1011 * the driver is loaded. For AMT version (only with 82573)
1012 * of the f/w this means that the network i/f is open. 1012 * of the f/w this means that the network i/f is open.
@@ -1032,7 +1032,7 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
1032 * e1000_release_hw_control - release control of the h/w to f/w 1032 * e1000_release_hw_control - release control of the h/w to f/w
1033 * @adapter: address of board private structure 1033 * @adapter: address of board private structure
1034 * 1034 *
1035 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 1035 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1036 * For ASF and Pass Through versions of f/w this means that the 1036 * For ASF and Pass Through versions of f/w this means that the
1037 * driver is no longer loaded. For AMT version (only with 82573) i 1037 * driver is no longer loaded. For AMT version (only with 82573) i
1038 * of the f/w this means that the network i/f is closed. 1038 * of the f/w this means that the network i/f is closed.
@@ -1241,6 +1241,11 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1241 1241
1242/** 1242/**
1243 * e1000_update_itr - update the dynamic ITR value based on statistics 1243 * e1000_update_itr - update the dynamic ITR value based on statistics
1244 * @adapter: pointer to adapter
1245 * @itr_setting: current adapter->itr
1246 * @packets: the number of packets during this measurement interval
1247 * @bytes: the number of bytes during this measurement interval
1248 *
1244 * Stores a new ITR value based on packets and byte 1249 * Stores a new ITR value based on packets and byte
1245 * counts during the last interrupt. The advantage of per interrupt 1250 * counts during the last interrupt. The advantage of per interrupt
1246 * computation is faster updates and more accurate ITR for the current 1251 * computation is faster updates and more accurate ITR for the current
@@ -1250,10 +1255,6 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1250 * while increasing bulk throughput. 1255 * while increasing bulk throughput.
1251 * this functionality is controlled by the InterruptThrottleRate module 1256 * this functionality is controlled by the InterruptThrottleRate module
1252 * parameter (see e1000_param.c) 1257 * parameter (see e1000_param.c)
1253 * @adapter: pointer to adapter
1254 * @itr_setting: current adapter->itr
1255 * @packets: the number of packets during this measurement interval
1256 * @bytes: the number of bytes during this measurement interval
1257 **/ 1258 **/
1258static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 1259static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1259 u16 itr_setting, int packets, 1260 u16 itr_setting, int packets,
@@ -1366,6 +1367,7 @@ set_itr_now:
1366/** 1367/**
1367 * e1000_clean - NAPI Rx polling callback 1368 * e1000_clean - NAPI Rx polling callback
1368 * @adapter: board private structure 1369 * @adapter: board private structure
1370 * @budget: amount of packets driver is allowed to process this poll
1369 **/ 1371 **/
1370static int e1000_clean(struct napi_struct *napi, int budget) 1372static int e1000_clean(struct napi_struct *napi, int budget)
1371{ 1373{
@@ -2000,7 +2002,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
2000 e1000_check_reset_block(hw)) 2002 e1000_check_reset_block(hw))
2001 return; 2003 return;
2002 2004
2003 /* managebility (AMT) is enabled */ 2005 /* manageability (AMT) is enabled */
2004 if (er32(MANC) & E1000_MANC_SMBUS_EN) 2006 if (er32(MANC) & E1000_MANC_SMBUS_EN)
2005 return; 2007 return;
2006 2008
@@ -3488,7 +3490,6 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3488static void e1000e_disable_l1aspm(struct pci_dev *pdev) 3490static void e1000e_disable_l1aspm(struct pci_dev *pdev)
3489{ 3491{
3490 int pos; 3492 int pos;
3491 u32 cap;
3492 u16 val; 3493 u16 val;
3493 3494
3494 /* 3495 /*
@@ -3503,7 +3504,6 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev)
3503 * active. 3504 * active.
3504 */ 3505 */
3505 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3506 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3506 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &cap);
3507 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); 3507 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val);
3508 if (val & 0x2) { 3508 if (val & 0x2) {
3509 dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); 3509 dev_warn(&pdev->dev, "Disabling L1 ASPM\n");
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index fc6fee112f1c..dab3c468a768 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -121,7 +121,7 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
121 * @offset: register offset to be read 121 * @offset: register offset to be read
122 * @data: pointer to the read data 122 * @data: pointer to the read data
123 * 123 *
124 * Reads the MDI control regsiter in the PHY at offset and stores the 124 * Reads the MDI control register in the PHY at offset and stores the
125 * information read to data. 125 * information read to data.
126 **/ 126 **/
127static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) 127static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
@@ -1172,7 +1172,7 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1172} 1172}
1173 1173
1174/** 1174/**
1175 * e1000e_check_downshift - Checks whether a downshift in speed occured 1175 * e1000e_check_downshift - Checks whether a downshift in speed occurred
1176 * @hw: pointer to the HW structure 1176 * @hw: pointer to the HW structure
1177 * 1177 *
1178 * Success returns 0, Failure returns 1 1178 * Success returns 0, Failure returns 1
@@ -1388,8 +1388,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
1388 * 1388 *
1389 * The automatic gain control (agc) normalizes the amplitude of the 1389 * The automatic gain control (agc) normalizes the amplitude of the
1390 * received signal, adjusting for the attenuation produced by the 1390 * received signal, adjusting for the attenuation produced by the
1391 * cable. By reading the AGC registers, which reperesent the 1391 * cable. By reading the AGC registers, which represent the
1392 * cobination of course and fine gain value, the value can be put 1392 * combination of course and fine gain value, the value can be put
1393 * into a lookup table to obtain the approximate cable length 1393 * into a lookup table to obtain the approximate cable length
1394 * for each channel. 1394 * for each channel.
1395 **/ 1395 **/
@@ -1619,7 +1619,7 @@ s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
1619 * Verify the reset block is not blocking us from resetting. Acquire 1619 * Verify the reset block is not blocking us from resetting. Acquire
1620 * semaphore (if necessary) and read/set/write the device control reset 1620 * semaphore (if necessary) and read/set/write the device control reset
1621 * bit in the PHY. Wait the appropriate delay time for the device to 1621 * bit in the PHY. Wait the appropriate delay time for the device to
1622 * reset and relase the semaphore (if necessary). 1622 * reset and release the semaphore (if necessary).
1623 **/ 1623 **/
1624s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) 1624s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
1625{ 1625{
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 88fb53eba715..7c4ead35cfa2 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0083" 43#define DRV_VERSION "EHEA_0087"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
@@ -386,6 +386,13 @@ struct ehea_port_res {
386 386
387 387
388#define EHEA_MAX_PORTS 16 388#define EHEA_MAX_PORTS 16
389
390#define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle,
391 RecvCQ handle, EQ handle,
392 SendMR handle, RecvMR handle */
393#define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */
394#define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */
395
389struct ehea_adapter { 396struct ehea_adapter {
390 u64 handle; 397 u64 handle;
391 struct of_device *ofdev; 398 struct of_device *ofdev;
@@ -405,6 +412,31 @@ struct ehea_mc_list {
405 u64 macaddr; 412 u64 macaddr;
406}; 413};
407 414
415/* kdump support */
416struct ehea_fw_handle_entry {
417 u64 adh; /* Adapter Handle */
418 u64 fwh; /* Firmware Handle */
419};
420
421struct ehea_fw_handle_array {
422 struct ehea_fw_handle_entry *arr;
423 int num_entries;
424 struct semaphore lock;
425};
426
427struct ehea_bcmc_reg_entry {
428 u64 adh; /* Adapter Handle */
429 u32 port_id; /* Logical Port Id */
430 u8 reg_type; /* Registration Type */
431 u64 macaddr;
432};
433
434struct ehea_bcmc_reg_array {
435 struct ehea_bcmc_reg_entry *arr;
436 int num_entries;
437 struct semaphore lock;
438};
439
408#define EHEA_PORT_UP 1 440#define EHEA_PORT_UP 1
409#define EHEA_PORT_DOWN 0 441#define EHEA_PORT_DOWN 0
410#define EHEA_PHY_LINK_UP 1 442#define EHEA_PHY_LINK_UP 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index c051c7e09b9a..21af674b764e 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -35,6 +35,7 @@
35#include <linux/if_ether.h> 35#include <linux/if_ether.h>
36#include <linux/notifier.h> 36#include <linux/notifier.h>
37#include <linux/reboot.h> 37#include <linux/reboot.h>
38#include <asm/kexec.h>
38 39
39#include <net/ip.h> 40#include <net/ip.h>
40 41
@@ -98,8 +99,10 @@ static int port_name_cnt;
98static LIST_HEAD(adapter_list); 99static LIST_HEAD(adapter_list);
99u64 ehea_driver_flags; 100u64 ehea_driver_flags;
100struct work_struct ehea_rereg_mr_task; 101struct work_struct ehea_rereg_mr_task;
101
102struct semaphore dlpar_mem_lock; 102struct semaphore dlpar_mem_lock;
103struct ehea_fw_handle_array ehea_fw_handles;
104struct ehea_bcmc_reg_array ehea_bcmc_regs;
105
103 106
104static int __devinit ehea_probe_adapter(struct of_device *dev, 107static int __devinit ehea_probe_adapter(struct of_device *dev,
105 const struct of_device_id *id); 108 const struct of_device_id *id);
@@ -132,6 +135,160 @@ void ehea_dump(void *adr, int len, char *msg)
132 } 135 }
133} 136}
134 137
138static void ehea_update_firmware_handles(void)
139{
140 struct ehea_fw_handle_entry *arr = NULL;
141 struct ehea_adapter *adapter;
142 int num_adapters = 0;
143 int num_ports = 0;
144 int num_portres = 0;
145 int i = 0;
146 int num_fw_handles, k, l;
147
148 /* Determine number of handles */
149 list_for_each_entry(adapter, &adapter_list, list) {
150 num_adapters++;
151
152 for (k = 0; k < EHEA_MAX_PORTS; k++) {
153 struct ehea_port *port = adapter->port[k];
154
155 if (!port || (port->state != EHEA_PORT_UP))
156 continue;
157
158 num_ports++;
159 num_portres += port->num_def_qps + port->num_add_tx_qps;
160 }
161 }
162
163 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
164 num_ports * EHEA_NUM_PORT_FW_HANDLES +
165 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
166
167 if (num_fw_handles) {
168 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
169 if (!arr)
170 return; /* Keep the existing array */
171 } else
172 goto out_update;
173
174 list_for_each_entry(adapter, &adapter_list, list) {
175 for (k = 0; k < EHEA_MAX_PORTS; k++) {
176 struct ehea_port *port = adapter->port[k];
177
178 if (!port || (port->state != EHEA_PORT_UP))
179 continue;
180
181 for (l = 0;
182 l < port->num_def_qps + port->num_add_tx_qps;
183 l++) {
184 struct ehea_port_res *pr = &port->port_res[l];
185
186 arr[i].adh = adapter->handle;
187 arr[i++].fwh = pr->qp->fw_handle;
188 arr[i].adh = adapter->handle;
189 arr[i++].fwh = pr->send_cq->fw_handle;
190 arr[i].adh = adapter->handle;
191 arr[i++].fwh = pr->recv_cq->fw_handle;
192 arr[i].adh = adapter->handle;
193 arr[i++].fwh = pr->eq->fw_handle;
194 arr[i].adh = adapter->handle;
195 arr[i++].fwh = pr->send_mr.handle;
196 arr[i].adh = adapter->handle;
197 arr[i++].fwh = pr->recv_mr.handle;
198 }
199 arr[i].adh = adapter->handle;
200 arr[i++].fwh = port->qp_eq->fw_handle;
201 }
202
203 arr[i].adh = adapter->handle;
204 arr[i++].fwh = adapter->neq->fw_handle;
205
206 if (adapter->mr.handle) {
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = adapter->mr.handle;
209 }
210 }
211
212out_update:
213 kfree(ehea_fw_handles.arr);
214 ehea_fw_handles.arr = arr;
215 ehea_fw_handles.num_entries = i;
216}
217
218static void ehea_update_bcmc_registrations(void)
219{
220 struct ehea_bcmc_reg_entry *arr = NULL;
221 struct ehea_adapter *adapter;
222 struct ehea_mc_list *mc_entry;
223 int num_registrations = 0;
224 int i = 0;
225 int k;
226
227 /* Determine number of registrations */
228 list_for_each_entry(adapter, &adapter_list, list)
229 for (k = 0; k < EHEA_MAX_PORTS; k++) {
230 struct ehea_port *port = adapter->port[k];
231
232 if (!port || (port->state != EHEA_PORT_UP))
233 continue;
234
235 num_registrations += 2; /* Broadcast registrations */
236
237 list_for_each_entry(mc_entry, &port->mc_list->list,list)
238 num_registrations += 2;
239 }
240
241 if (num_registrations) {
242 arr = kzalloc(num_registrations * sizeof(*arr), GFP_KERNEL);
243 if (!arr)
244 return; /* Keep the existing array */
245 } else
246 goto out_update;
247
248 list_for_each_entry(adapter, &adapter_list, list) {
249 for (k = 0; k < EHEA_MAX_PORTS; k++) {
250 struct ehea_port *port = adapter->port[k];
251
252 if (!port || (port->state != EHEA_PORT_UP))
253 continue;
254
255 arr[i].adh = adapter->handle;
256 arr[i].port_id = port->logical_port_id;
257 arr[i].reg_type = EHEA_BCMC_BROADCAST |
258 EHEA_BCMC_UNTAGGED;
259 arr[i++].macaddr = port->mac_addr;
260
261 arr[i].adh = adapter->handle;
262 arr[i].port_id = port->logical_port_id;
263 arr[i].reg_type = EHEA_BCMC_BROADCAST |
264 EHEA_BCMC_VLANID_ALL;
265 arr[i++].macaddr = port->mac_addr;
266
267 list_for_each_entry(mc_entry,
268 &port->mc_list->list, list) {
269 arr[i].adh = adapter->handle;
270 arr[i].port_id = port->logical_port_id;
271 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
272 EHEA_BCMC_MULTICAST |
273 EHEA_BCMC_UNTAGGED;
274 arr[i++].macaddr = mc_entry->macaddr;
275
276 arr[i].adh = adapter->handle;
277 arr[i].port_id = port->logical_port_id;
278 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
279 EHEA_BCMC_MULTICAST |
280 EHEA_BCMC_VLANID_ALL;
281 arr[i++].macaddr = mc_entry->macaddr;
282 }
283 }
284 }
285
286out_update:
287 kfree(ehea_bcmc_regs.arr);
288 ehea_bcmc_regs.arr = arr;
289 ehea_bcmc_regs.num_entries = i;
290}
291
135static struct net_device_stats *ehea_get_stats(struct net_device *dev) 292static struct net_device_stats *ehea_get_stats(struct net_device *dev)
136{ 293{
137 struct ehea_port *port = netdev_priv(dev); 294 struct ehea_port *port = netdev_priv(dev);
@@ -1601,19 +1758,25 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1601 1758
1602 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); 1759 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1603 1760
1761 down(&ehea_bcmc_regs.lock);
1762
1604 /* Deregister old MAC in pHYP */ 1763 /* Deregister old MAC in pHYP */
1605 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 1764 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1606 if (ret) 1765 if (ret)
1607 goto out_free; 1766 goto out_upregs;
1608 1767
1609 port->mac_addr = cb0->port_mac_addr << 16; 1768 port->mac_addr = cb0->port_mac_addr << 16;
1610 1769
1611 /* Register new MAC in pHYP */ 1770 /* Register new MAC in pHYP */
1612 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 1771 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1613 if (ret) 1772 if (ret)
1614 goto out_free; 1773 goto out_upregs;
1615 1774
1616 ret = 0; 1775 ret = 0;
1776
1777out_upregs:
1778 ehea_update_bcmc_registrations();
1779 up(&ehea_bcmc_regs.lock);
1617out_free: 1780out_free:
1618 kfree(cb0); 1781 kfree(cb0);
1619out: 1782out:
@@ -1775,9 +1938,11 @@ static void ehea_set_multicast_list(struct net_device *dev)
1775 } 1938 }
1776 ehea_promiscuous(dev, 0); 1939 ehea_promiscuous(dev, 0);
1777 1940
1941 down(&ehea_bcmc_regs.lock);
1942
1778 if (dev->flags & IFF_ALLMULTI) { 1943 if (dev->flags & IFF_ALLMULTI) {
1779 ehea_allmulti(dev, 1); 1944 ehea_allmulti(dev, 1);
1780 return; 1945 goto out;
1781 } 1946 }
1782 ehea_allmulti(dev, 0); 1947 ehea_allmulti(dev, 0);
1783 1948
@@ -1803,6 +1968,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
1803 1968
1804 } 1969 }
1805out: 1970out:
1971 ehea_update_bcmc_registrations();
1972 up(&ehea_bcmc_regs.lock);
1806 return; 1973 return;
1807} 1974}
1808 1975
@@ -2285,6 +2452,8 @@ static int ehea_up(struct net_device *dev)
2285 if (port->state == EHEA_PORT_UP) 2452 if (port->state == EHEA_PORT_UP)
2286 return 0; 2453 return 0;
2287 2454
2455 down(&ehea_fw_handles.lock);
2456
2288 ret = ehea_port_res_setup(port, port->num_def_qps, 2457 ret = ehea_port_res_setup(port, port->num_def_qps,
2289 port->num_add_tx_qps); 2458 port->num_add_tx_qps);
2290 if (ret) { 2459 if (ret) {
@@ -2321,8 +2490,17 @@ static int ehea_up(struct net_device *dev)
2321 } 2490 }
2322 } 2491 }
2323 2492
2324 ret = 0; 2493 down(&ehea_bcmc_regs.lock);
2494
2495 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2496 if (ret) {
2497 ret = -EIO;
2498 goto out_free_irqs;
2499 }
2500
2325 port->state = EHEA_PORT_UP; 2501 port->state = EHEA_PORT_UP;
2502
2503 ret = 0;
2326 goto out; 2504 goto out;
2327 2505
2328out_free_irqs: 2506out_free_irqs:
@@ -2334,6 +2512,12 @@ out:
2334 if (ret) 2512 if (ret)
2335 ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2513 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2336 2514
2515 ehea_update_bcmc_registrations();
2516 up(&ehea_bcmc_regs.lock);
2517
2518 ehea_update_firmware_handles();
2519 up(&ehea_fw_handles.lock);
2520
2337 return ret; 2521 return ret;
2338} 2522}
2339 2523
@@ -2382,16 +2566,27 @@ static int ehea_down(struct net_device *dev)
2382 if (port->state == EHEA_PORT_DOWN) 2566 if (port->state == EHEA_PORT_DOWN)
2383 return 0; 2567 return 0;
2384 2568
2569 down(&ehea_bcmc_regs.lock);
2385 ehea_drop_multicast_list(dev); 2570 ehea_drop_multicast_list(dev);
2571 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2572
2386 ehea_free_interrupts(dev); 2573 ehea_free_interrupts(dev);
2387 2574
2575 down(&ehea_fw_handles.lock);
2576
2388 port->state = EHEA_PORT_DOWN; 2577 port->state = EHEA_PORT_DOWN;
2389 2578
2579 ehea_update_bcmc_registrations();
2580 up(&ehea_bcmc_regs.lock);
2581
2390 ret = ehea_clean_all_portres(port); 2582 ret = ehea_clean_all_portres(port);
2391 if (ret) 2583 if (ret)
2392 ehea_info("Failed freeing resources for %s. ret=%i", 2584 ehea_info("Failed freeing resources for %s. ret=%i",
2393 dev->name, ret); 2585 dev->name, ret);
2394 2586
2587 ehea_update_firmware_handles();
2588 up(&ehea_fw_handles.lock);
2589
2395 return ret; 2590 return ret;
2396} 2591}
2397 2592
@@ -2920,19 +3115,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2920 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3115 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2921 3116
2922 INIT_WORK(&port->reset_task, ehea_reset_port); 3117 INIT_WORK(&port->reset_task, ehea_reset_port);
2923
2924 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2925 if (ret) {
2926 ret = -EIO;
2927 goto out_unreg_port;
2928 }
2929
2930 ehea_set_ethtool_ops(dev); 3118 ehea_set_ethtool_ops(dev);
2931 3119
2932 ret = register_netdev(dev); 3120 ret = register_netdev(dev);
2933 if (ret) { 3121 if (ret) {
2934 ehea_error("register_netdev failed. ret=%d", ret); 3122 ehea_error("register_netdev failed. ret=%d", ret);
2935 goto out_dereg_bc; 3123 goto out_unreg_port;
2936 } 3124 }
2937 3125
2938 port->lro_max_aggr = lro_max_aggr; 3126 port->lro_max_aggr = lro_max_aggr;
@@ -2949,9 +3137,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2949 3137
2950 return port; 3138 return port;
2951 3139
2952out_dereg_bc:
2953 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2954
2955out_unreg_port: 3140out_unreg_port:
2956 ehea_unregister_port(port); 3141 ehea_unregister_port(port);
2957 3142
@@ -2971,7 +3156,6 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
2971{ 3156{
2972 unregister_netdev(port->netdev); 3157 unregister_netdev(port->netdev);
2973 ehea_unregister_port(port); 3158 ehea_unregister_port(port);
2974 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2975 kfree(port->mc_list); 3159 kfree(port->mc_list);
2976 free_netdev(port->netdev); 3160 free_netdev(port->netdev);
2977 port->adapter->active_ports--; 3161 port->adapter->active_ports--;
@@ -3014,7 +3198,6 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3014 3198
3015 i++; 3199 i++;
3016 }; 3200 };
3017
3018 return 0; 3201 return 0;
3019} 3202}
3020 3203
@@ -3159,6 +3342,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
3159 ehea_error("Invalid ibmebus device probed"); 3342 ehea_error("Invalid ibmebus device probed");
3160 return -EINVAL; 3343 return -EINVAL;
3161 } 3344 }
3345 down(&ehea_fw_handles.lock);
3162 3346
3163 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3347 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3164 if (!adapter) { 3348 if (!adapter) {
@@ -3239,7 +3423,10 @@ out_kill_eq:
3239 3423
3240out_free_ad: 3424out_free_ad:
3241 kfree(adapter); 3425 kfree(adapter);
3426
3242out: 3427out:
3428 ehea_update_firmware_handles();
3429 up(&ehea_fw_handles.lock);
3243 return ret; 3430 return ret;
3244} 3431}
3245 3432
@@ -3258,18 +3445,41 @@ static int __devexit ehea_remove(struct of_device *dev)
3258 3445
3259 flush_scheduled_work(); 3446 flush_scheduled_work();
3260 3447
3448 down(&ehea_fw_handles.lock);
3449
3261 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3450 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3262 tasklet_kill(&adapter->neq_tasklet); 3451 tasklet_kill(&adapter->neq_tasklet);
3263 3452
3264 ehea_destroy_eq(adapter->neq); 3453 ehea_destroy_eq(adapter->neq);
3265 ehea_remove_adapter_mr(adapter); 3454 ehea_remove_adapter_mr(adapter);
3266 list_del(&adapter->list); 3455 list_del(&adapter->list);
3267
3268 kfree(adapter); 3456 kfree(adapter);
3269 3457
3458 ehea_update_firmware_handles();
3459 up(&ehea_fw_handles.lock);
3460
3270 return 0; 3461 return 0;
3271} 3462}
3272 3463
3464void ehea_crash_handler(void)
3465{
3466 int i;
3467
3468 if (ehea_fw_handles.arr)
3469 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3470 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3471 ehea_fw_handles.arr[i].fwh,
3472 FORCE_FREE);
3473
3474 if (ehea_bcmc_regs.arr)
3475 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3476 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3477 ehea_bcmc_regs.arr[i].port_id,
3478 ehea_bcmc_regs.arr[i].reg_type,
3479 ehea_bcmc_regs.arr[i].macaddr,
3480 0, H_DEREG_BCMC);
3481}
3482
3273static int ehea_reboot_notifier(struct notifier_block *nb, 3483static int ehea_reboot_notifier(struct notifier_block *nb,
3274 unsigned long action, void *unused) 3484 unsigned long action, void *unused)
3275{ 3485{
@@ -3330,7 +3540,12 @@ int __init ehea_module_init(void)
3330 3540
3331 3541
3332 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); 3542 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3543 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3544 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3545
3333 sema_init(&dlpar_mem_lock, 1); 3546 sema_init(&dlpar_mem_lock, 1);
3547 sema_init(&ehea_fw_handles.lock, 1);
3548 sema_init(&ehea_bcmc_regs.lock, 1);
3334 3549
3335 ret = check_module_parm(); 3550 ret = check_module_parm();
3336 if (ret) 3551 if (ret)
@@ -3340,12 +3555,18 @@ int __init ehea_module_init(void)
3340 if (ret) 3555 if (ret)
3341 goto out; 3556 goto out;
3342 3557
3343 register_reboot_notifier(&ehea_reboot_nb); 3558 ret = register_reboot_notifier(&ehea_reboot_nb);
3559 if (ret)
3560 ehea_info("failed registering reboot notifier");
3561
3562 ret = crash_shutdown_register(&ehea_crash_handler);
3563 if (ret)
3564 ehea_info("failed registering crash handler");
3344 3565
3345 ret = ibmebus_register_driver(&ehea_driver); 3566 ret = ibmebus_register_driver(&ehea_driver);
3346 if (ret) { 3567 if (ret) {
3347 ehea_error("failed registering eHEA device driver on ebus"); 3568 ehea_error("failed registering eHEA device driver on ebus");
3348 goto out; 3569 goto out2;
3349 } 3570 }
3350 3571
3351 ret = driver_create_file(&ehea_driver.driver, 3572 ret = driver_create_file(&ehea_driver.driver,
@@ -3353,21 +3574,33 @@ int __init ehea_module_init(void)
3353 if (ret) { 3574 if (ret) {
3354 ehea_error("failed to register capabilities attribute, ret=%d", 3575 ehea_error("failed to register capabilities attribute, ret=%d",
3355 ret); 3576 ret);
3356 unregister_reboot_notifier(&ehea_reboot_nb); 3577 goto out3;
3357 ibmebus_unregister_driver(&ehea_driver);
3358 goto out;
3359 } 3578 }
3360 3579
3580 return ret;
3581
3582out3:
3583 ibmebus_unregister_driver(&ehea_driver);
3584out2:
3585 unregister_reboot_notifier(&ehea_reboot_nb);
3586 crash_shutdown_unregister(&ehea_crash_handler);
3361out: 3587out:
3362 return ret; 3588 return ret;
3363} 3589}
3364 3590
3365static void __exit ehea_module_exit(void) 3591static void __exit ehea_module_exit(void)
3366{ 3592{
3593 int ret;
3594
3367 flush_scheduled_work(); 3595 flush_scheduled_work();
3368 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3596 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3369 ibmebus_unregister_driver(&ehea_driver); 3597 ibmebus_unregister_driver(&ehea_driver);
3370 unregister_reboot_notifier(&ehea_reboot_nb); 3598 unregister_reboot_notifier(&ehea_reboot_nb);
3599 ret = crash_shutdown_unregister(&ehea_crash_handler);
3600 if (ret)
3601 ehea_info("failed unregistering crash handler");
3602 kfree(ehea_fw_handles.arr);
3603 kfree(ehea_bcmc_regs.arr);
3371 ehea_destroy_busmap(); 3604 ehea_destroy_busmap();
3372} 3605}
3373 3606
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 0809a6a5a286..46a90e9ec563 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -900,7 +900,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
900 if (RSV_GETBIT(rxstat, RSV_LENCHECKERR)) 900 if (RSV_GETBIT(rxstat, RSV_LENCHECKERR))
901 ndev->stats.rx_frame_errors++; 901 ndev->stats.rx_frame_errors++;
902 } else { 902 } else {
903 skb = dev_alloc_skb(len); 903 skb = dev_alloc_skb(len + NET_IP_ALIGN);
904 if (!skb) { 904 if (!skb) {
905 if (netif_msg_rx_err(priv)) 905 if (netif_msg_rx_err(priv))
906 dev_err(&ndev->dev, 906 dev_err(&ndev->dev,
@@ -908,6 +908,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
908 ndev->stats.rx_dropped++; 908 ndev->stats.rx_dropped++;
909 } else { 909 } else {
910 skb->dev = ndev; 910 skb->dev = ndev;
911 skb_reserve(skb, NET_IP_ALIGN);
911 /* copy the packet from the receive buffer */ 912 /* copy the packet from the receive buffer */
912 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 913 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv),
913 len, skb_put(skb, len)); 914 len, skb_put(skb, len));
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 0fbf1bbbaee9..d7a3ea88eddb 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1253,7 +1253,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1253 1253
1254 /* Setup interrupt handlers. */ 1254 /* Setup interrupt handlers. */
1255 for (idp = id; idp->name; idp++) { 1255 for (idp = id; idp->name; idp++) {
1256 if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0) 1256 if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0)
1257 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); 1257 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq);
1258 } 1258 }
1259 1259
@@ -1382,7 +1382,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1382 1382
1383 /* Setup interrupt handlers. */ 1383 /* Setup interrupt handlers. */
1384 for (idp = id; idp->name; idp++) { 1384 for (idp = id; idp->name; idp++) {
1385 if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0) 1385 if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0)
1386 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); 1386 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq);
1387 } 1387 }
1388 1388
@@ -1553,7 +1553,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1553 1553
1554 /* Setup interrupt handlers. */ 1554 /* Setup interrupt handlers. */
1555 for (idp = id; idp->name; idp++) { 1555 for (idp = id; idp->name; idp++) {
1556 if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) 1556 if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0)
1557 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); 1557 printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq);
1558 } 1558 }
1559 1559
@@ -1680,7 +1680,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
1680 1680
1681 /* Setup interrupt handlers. */ 1681 /* Setup interrupt handlers. */
1682 for (idp = id; idp->name; idp++) { 1682 for (idp = id; idp->name; idp++) {
1683 if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) 1683 if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0)
1684 printk("FEC: Could not allocate %s IRQ(%d)!\n", 1684 printk("FEC: Could not allocate %s IRQ(%d)!\n",
1685 idp->name, b+idp->irq); 1685 idp->name, b+idp->irq);
1686 } 1686 }
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 42d94edeee26..af869cf9ae7d 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -946,16 +946,11 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
946{ 946{
947 struct fs_enet_private *fep = netdev_priv(dev); 947 struct fs_enet_private *fep = netdev_priv(dev);
948 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; 948 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
949 unsigned long flags;
950 int rc;
951 949
952 if (!netif_running(dev)) 950 if (!netif_running(dev))
953 return -EINVAL; 951 return -EINVAL;
954 952
955 spin_lock_irqsave(&fep->lock, flags); 953 return phy_mii_ioctl(fep->phydev, mii, cmd);
956 rc = phy_mii_ioctl(fep->phydev, mii, cmd);
957 spin_unlock_irqrestore(&fep->lock, flags);
958 return rc;
959} 954}
960 955
961extern int fs_mii_connect(struct net_device *dev); 956extern int fs_mii_connect(struct net_device *dev);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4244fc282f21..718cf77e345a 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -605,7 +605,7 @@ void stop_gfar(struct net_device *dev)
605 605
606 free_skb_resources(priv); 606 free_skb_resources(priv);
607 607
608 dma_free_coherent(NULL, 608 dma_free_coherent(&dev->dev,
609 sizeof(struct txbd8)*priv->tx_ring_size 609 sizeof(struct txbd8)*priv->tx_ring_size
610 + sizeof(struct rxbd8)*priv->rx_ring_size, 610 + sizeof(struct rxbd8)*priv->rx_ring_size,
611 priv->tx_bd_base, 611 priv->tx_bd_base,
@@ -626,7 +626,7 @@ static void free_skb_resources(struct gfar_private *priv)
626 for (i = 0; i < priv->tx_ring_size; i++) { 626 for (i = 0; i < priv->tx_ring_size; i++) {
627 627
628 if (priv->tx_skbuff[i]) { 628 if (priv->tx_skbuff[i]) {
629 dma_unmap_single(NULL, txbdp->bufPtr, 629 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
630 txbdp->length, 630 txbdp->length,
631 DMA_TO_DEVICE); 631 DMA_TO_DEVICE);
632 dev_kfree_skb_any(priv->tx_skbuff[i]); 632 dev_kfree_skb_any(priv->tx_skbuff[i]);
@@ -643,7 +643,7 @@ static void free_skb_resources(struct gfar_private *priv)
643 if(priv->rx_skbuff != NULL) { 643 if(priv->rx_skbuff != NULL) {
644 for (i = 0; i < priv->rx_ring_size; i++) { 644 for (i = 0; i < priv->rx_ring_size; i++) {
645 if (priv->rx_skbuff[i]) { 645 if (priv->rx_skbuff[i]) {
646 dma_unmap_single(NULL, rxbdp->bufPtr, 646 dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr,
647 priv->rx_buffer_size, 647 priv->rx_buffer_size,
648 DMA_FROM_DEVICE); 648 DMA_FROM_DEVICE);
649 649
@@ -708,7 +708,7 @@ int startup_gfar(struct net_device *dev)
708 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 708 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
709 709
710 /* Allocate memory for the buffer descriptors */ 710 /* Allocate memory for the buffer descriptors */
711 vaddr = (unsigned long) dma_alloc_coherent(NULL, 711 vaddr = (unsigned long) dma_alloc_coherent(&dev->dev,
712 sizeof (struct txbd8) * priv->tx_ring_size + 712 sizeof (struct txbd8) * priv->tx_ring_size +
713 sizeof (struct rxbd8) * priv->rx_ring_size, 713 sizeof (struct rxbd8) * priv->rx_ring_size,
714 &addr, GFP_KERNEL); 714 &addr, GFP_KERNEL);
@@ -919,7 +919,7 @@ err_irq_fail:
919rx_skb_fail: 919rx_skb_fail:
920 free_skb_resources(priv); 920 free_skb_resources(priv);
921tx_skb_fail: 921tx_skb_fail:
922 dma_free_coherent(NULL, 922 dma_free_coherent(&dev->dev,
923 sizeof(struct txbd8)*priv->tx_ring_size 923 sizeof(struct txbd8)*priv->tx_ring_size
924 + sizeof(struct rxbd8)*priv->rx_ring_size, 924 + sizeof(struct rxbd8)*priv->rx_ring_size,
925 priv->tx_bd_base, 925 priv->tx_bd_base,
@@ -1053,7 +1053,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1053 1053
1054 /* Set buffer length and pointer */ 1054 /* Set buffer length and pointer */
1055 txbdp->length = skb->len; 1055 txbdp->length = skb->len;
1056 txbdp->bufPtr = dma_map_single(NULL, skb->data, 1056 txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1057 skb->len, DMA_TO_DEVICE); 1057 skb->len, DMA_TO_DEVICE);
1058 1058
1059 /* Save the skb pointer so we can free it later */ 1059 /* Save the skb pointer so we can free it later */
@@ -1332,7 +1332,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1332 */ 1332 */
1333 skb_reserve(skb, alignamount); 1333 skb_reserve(skb, alignamount);
1334 1334
1335 bdp->bufPtr = dma_map_single(NULL, skb->data, 1335 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1336 priv->rx_buffer_size, DMA_FROM_DEVICE); 1336 priv->rx_buffer_size, DMA_FROM_DEVICE);
1337 1337
1338 bdp->length = 0; 1338 bdp->length = 0;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index bff280eff5e3..6a1f23092099 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -439,7 +439,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
439 err = igb_request_msix(adapter); 439 err = igb_request_msix(adapter);
440 if (!err) { 440 if (!err) {
441 /* enable IAM, auto-mask, 441 /* enable IAM, auto-mask,
442 * DO NOT USE EIAME or IAME in legacy mode */ 442 * DO NOT USE EIAM or IAM in legacy mode */
443 wr32(E1000_IAM, IMS_ENABLE_MASK); 443 wr32(E1000_IAM, IMS_ENABLE_MASK);
444 goto request_done; 444 goto request_done;
445 } 445 }
@@ -465,14 +465,9 @@ static int igb_request_irq(struct igb_adapter *adapter)
465 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, 465 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
466 netdev->name, netdev); 466 netdev->name, netdev);
467 467
468 if (err) { 468 if (err)
469 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", 469 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
470 err); 470 err);
471 goto request_done;
472 }
473
474 /* enable IAM, auto-mask */
475 wr32(E1000_IAM, IMS_ENABLE_MASK);
476 471
477request_done: 472request_done:
478 return err; 473 return err;
@@ -821,7 +816,8 @@ void igb_reset(struct igb_adapter *adapter)
821 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 816 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
822 817
823 igb_reset_adaptive(&adapter->hw); 818 igb_reset_adaptive(&adapter->hw);
824 adapter->hw.phy.ops.get_phy_info(&adapter->hw); 819 if (adapter->hw.phy.ops.get_phy_info)
820 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
825} 821}
826 822
827/** 823/**
@@ -2057,7 +2053,8 @@ static void igb_set_multi(struct net_device *netdev)
2057static void igb_update_phy_info(unsigned long data) 2053static void igb_update_phy_info(unsigned long data)
2058{ 2054{
2059 struct igb_adapter *adapter = (struct igb_adapter *) data; 2055 struct igb_adapter *adapter = (struct igb_adapter *) data;
2060 adapter->hw.phy.ops.get_phy_info(&adapter->hw); 2056 if (adapter->hw.phy.ops.get_phy_info)
2057 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
2061} 2058}
2062 2059
2063/** 2060/**
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 53a9fd086f96..75f3a68ee354 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -67,6 +67,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
67 {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)}, 67 {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)},
68 {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)}, 68 {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)},
69 {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)}, 69 {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)},
70 {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
70 {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)}, 71 {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)},
71 {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)}, 72 {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)},
72 {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)}, 73 {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)},
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 23d0a4afe0e1..c2095ce531c9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2133,7 +2133,7 @@ static void ixgbe_watchdog(unsigned long data)
2133 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 2133 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
2134 "10 Gbps" : 2134 "10 Gbps" :
2135 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 2135 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
2136 "1 Gpbs" : "unknown speed")), 2136 "1 Gbps" : "unknown speed")),
2137 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 2137 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
2138 (FLOW_RX ? "RX" : 2138 (FLOW_RX ? "RX" :
2139 (FLOW_TX ? "TX" : "None")))); 2139 (FLOW_TX ? "TX" : "None"))));
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 81bf005ff280..1d210ed46130 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -148,7 +148,7 @@ static void macb_handle_link_change(struct net_device *dev)
148 148
149 if (phydev->duplex) 149 if (phydev->duplex)
150 reg |= MACB_BIT(FD); 150 reg |= MACB_BIT(FD);
151 if (phydev->speed) 151 if (phydev->speed == SPEED_100)
152 reg |= MACB_BIT(SPD); 152 reg |= MACB_BIT(SPD);
153 153
154 macb_writel(bp, NCFGR, reg); 154 macb_writel(bp, NCFGR, reg);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index b528ce77c406..771139e283af 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -2104,6 +2104,7 @@ MODULE_LICENSE("GPL");
2104MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani" 2104MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani"
2105 " and Dale Farnsworth"); 2105 " and Dale Farnsworth");
2106MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 2106MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
2107MODULE_ALIAS("platform:mv643xx_eth");
2107 2108
2108/* 2109/*
2109 * The second part is the low level driver of the gigE ethernet ports. 2110 * The second part is the low level driver of the gigE ethernet ports.
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index e98ce1e4965b..d11ba61baa4f 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -1616,12 +1616,13 @@ static int niu_enable_alt_mac(struct niu *np, int index, int on)
1616 if (index >= niu_num_alt_addr(np)) 1616 if (index >= niu_num_alt_addr(np))
1617 return -EINVAL; 1617 return -EINVAL;
1618 1618
1619 if (np->flags & NIU_FLAGS_XMAC) 1619 if (np->flags & NIU_FLAGS_XMAC) {
1620 reg = XMAC_ADDR_CMPEN; 1620 reg = XMAC_ADDR_CMPEN;
1621 else 1621 mask = 1 << index;
1622 } else {
1622 reg = BMAC_ADDR_CMPEN; 1623 reg = BMAC_ADDR_CMPEN;
1623 1624 mask = 1 << (index + 1);
1624 mask = 1 << index; 1625 }
1625 1626
1626 val = nr64_mac(reg); 1627 val = nr64_mac(reg);
1627 if (on) 1628 if (on)
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 0e8626adc573..59dc05fcd371 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -499,7 +499,7 @@
499#define BMAC_ADDR2 0x00110UL 499#define BMAC_ADDR2 0x00110UL
500#define BMAC_ADDR2_ADDR2 0x000000000000ffffULL 500#define BMAC_ADDR2_ADDR2 0x000000000000ffffULL
501 501
502#define BMAC_NUM_ALT_ADDR 7 502#define BMAC_NUM_ALT_ADDR 6
503 503
504#define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL) 504#define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL)
505#define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL 505#define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index e8a63e483a2b..ce95c5d168fe 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1268,7 +1268,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
1268 } 1268 }
1269 } 1269 }
1270 1270
1271 if (interrupts && ei_debug) 1271 if (interrupts && ei_debug > 3)
1272 { 1272 {
1273 handled = 1; 1273 handled = 1;
1274 if (nr_serviced >= MAX_SERVICE) 1274 if (nr_serviced >= MAX_SERVICE)
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 6323988dfa1d..fd8158a86f64 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -590,6 +590,13 @@ static int pcnet_config(struct pcmcia_device *link)
590 dev->if_port = 0; 590 dev->if_port = 0;
591 } 591 }
592 592
593 if ((link->conf.ConfigBase == 0x03c0)
594 && (link->manf_id == 0x149) && (link->card_id = 0xc1ab)) {
595 printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n");
596 printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n");
597 goto failed;
598 }
599
593 local_hw_info = get_hwinfo(link); 600 local_hw_info = get_hwinfo(link);
594 if (local_hw_info == NULL) 601 if (local_hw_info == NULL)
595 local_hw_info = get_prom(link); 602 local_hw_info = get_prom(link);
@@ -1567,12 +1574,11 @@ static struct pcmcia_device_id pcnet_ids[] = {
1567 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145), 1574 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145),
1568 PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230), 1575 PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230),
1569 PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), 1576 PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530),
1570/* PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), conflict with axnet_cs */ 1577 PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab),
1571 PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), 1578 PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110),
1572 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), 1579 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
1573 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), 1580 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041),
1574 PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), 1581 PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452),
1575/* PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), conflict with axnet_cs */
1576 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300), 1582 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
1577 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307), 1583 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307),
1578 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), 1584 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index f4ca0591231d..3ac8529bb92c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -67,6 +67,7 @@ config REALTEK_PHY
67 67
68config FIXED_PHY 68config FIXED_PHY
69 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 69 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
70 depends on PHYLIB=y
70 ---help--- 71 ---help---
71 Adds the platform "fixed" MDIO Bus to cover the boards that use 72 Adds the platform "fixed" MDIO Bus to cover the boards that use
72 PHYs that are not connected to the real MDIO bus. 73 PHYs that are not connected to the real MDIO bus.
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 7ed632db00d7..d926168bc780 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -37,6 +37,7 @@
37 37
38#define MII_DM9161_SCR 0x10 38#define MII_DM9161_SCR 0x10
39#define MII_DM9161_SCR_INIT 0x0610 39#define MII_DM9161_SCR_INIT 0x0610
40#define MII_DM9161_SCR_RMII 0x0100
40 41
41/* DM9161 Interrupt Register */ 42/* DM9161 Interrupt Register */
42#define MII_DM9161_INTR 0x15 43#define MII_DM9161_INTR 0x15
@@ -103,7 +104,7 @@ static int dm9161_config_aneg(struct phy_device *phydev)
103 104
104static int dm9161_config_init(struct phy_device *phydev) 105static int dm9161_config_init(struct phy_device *phydev)
105{ 106{
106 int err; 107 int err, temp;
107 108
108 /* Isolate the PHY */ 109 /* Isolate the PHY */
109 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE); 110 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
@@ -111,9 +112,19 @@ static int dm9161_config_init(struct phy_device *phydev)
111 if (err < 0) 112 if (err < 0)
112 return err; 113 return err;
113 114
114 /* Do not bypass the scrambler/descrambler */ 115 switch (phydev->interface) {
115 err = phy_write(phydev, MII_DM9161_SCR, MII_DM9161_SCR_INIT); 116 case PHY_INTERFACE_MODE_MII:
117 temp = MII_DM9161_SCR_INIT;
118 break;
119 case PHY_INTERFACE_MODE_RMII:
120 temp = MII_DM9161_SCR_INIT | MII_DM9161_SCR_RMII;
121 break;
122 default:
123 return -EINVAL;
124 }
116 125
126 /* Do not bypass the scrambler/descrambler */
127 err = phy_write(phydev, MII_DM9161_SCR, temp);
117 if (err < 0) 128 if (err < 0)
118 return err; 129 return err;
119 130
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6e9f619c491f..963630c65ca9 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -49,13 +49,13 @@ int mdiobus_register(struct mii_bus *bus)
49 int i; 49 int i;
50 int err = 0; 50 int err = 0;
51 51
52 mutex_init(&bus->mdio_lock);
53
54 if (NULL == bus || NULL == bus->name || 52 if (NULL == bus || NULL == bus->name ||
55 NULL == bus->read || 53 NULL == bus->read ||
56 NULL == bus->write) 54 NULL == bus->write)
57 return -EINVAL; 55 return -EINVAL;
58 56
57 mutex_init(&bus->mdio_lock);
58
59 if (bus->reset) 59 if (bus->reset)
60 bus->reset(bus); 60 bus->reset(bus);
61 61
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index e0b072d9fdb7..3d10ca050b79 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -302,14 +302,14 @@ pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
302 struct pppol2tp_session *session; 302 struct pppol2tp_session *session;
303 struct hlist_node *walk; 303 struct hlist_node *walk;
304 304
305 read_lock(&tunnel->hlist_lock); 305 read_lock_bh(&tunnel->hlist_lock);
306 hlist_for_each_entry(session, walk, session_list, hlist) { 306 hlist_for_each_entry(session, walk, session_list, hlist) {
307 if (session->tunnel_addr.s_session == session_id) { 307 if (session->tunnel_addr.s_session == session_id) {
308 read_unlock(&tunnel->hlist_lock); 308 read_unlock_bh(&tunnel->hlist_lock);
309 return session; 309 return session;
310 } 310 }
311 } 311 }
312 read_unlock(&tunnel->hlist_lock); 312 read_unlock_bh(&tunnel->hlist_lock);
313 313
314 return NULL; 314 return NULL;
315} 315}
@@ -320,14 +320,14 @@ static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
320{ 320{
321 struct pppol2tp_tunnel *tunnel = NULL; 321 struct pppol2tp_tunnel *tunnel = NULL;
322 322
323 read_lock(&pppol2tp_tunnel_list_lock); 323 read_lock_bh(&pppol2tp_tunnel_list_lock);
324 list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) { 324 list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) {
325 if (tunnel->stats.tunnel_id == tunnel_id) { 325 if (tunnel->stats.tunnel_id == tunnel_id) {
326 read_unlock(&pppol2tp_tunnel_list_lock); 326 read_unlock_bh(&pppol2tp_tunnel_list_lock);
327 return tunnel; 327 return tunnel;
328 } 328 }
329 } 329 }
330 read_unlock(&pppol2tp_tunnel_list_lock); 330 read_unlock_bh(&pppol2tp_tunnel_list_lock);
331 331
332 return NULL; 332 return NULL;
333} 333}
@@ -342,10 +342,11 @@ static struct pppol2tp_tunnel *pppol2tp_tunnel_find(u16 tunnel_id)
342static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb) 342static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
343{ 343{
344 struct sk_buff *skbp; 344 struct sk_buff *skbp;
345 struct sk_buff *tmp;
345 u16 ns = PPPOL2TP_SKB_CB(skb)->ns; 346 u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
346 347
347 spin_lock(&session->reorder_q.lock); 348 spin_lock_bh(&session->reorder_q.lock);
348 skb_queue_walk(&session->reorder_q, skbp) { 349 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
349 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) { 350 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
350 __skb_insert(skb, skbp->prev, skbp, &session->reorder_q); 351 __skb_insert(skb, skbp->prev, skbp, &session->reorder_q);
351 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, 352 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
@@ -360,7 +361,7 @@ static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_
360 __skb_queue_tail(&session->reorder_q, skb); 361 __skb_queue_tail(&session->reorder_q, skb);
361 362
362out: 363out:
363 spin_unlock(&session->reorder_q.lock); 364 spin_unlock_bh(&session->reorder_q.lock);
364} 365}
365 366
366/* Dequeue a single skb. 367/* Dequeue a single skb.
@@ -371,10 +372,9 @@ static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct s
371 int length = PPPOL2TP_SKB_CB(skb)->length; 372 int length = PPPOL2TP_SKB_CB(skb)->length;
372 struct sock *session_sock = NULL; 373 struct sock *session_sock = NULL;
373 374
374 /* We're about to requeue the skb, so unlink it and return resources 375 /* We're about to requeue the skb, so return resources
375 * to its current owner (a socket receive buffer). 376 * to its current owner (a socket receive buffer).
376 */ 377 */
377 skb_unlink(skb, &session->reorder_q);
378 skb_orphan(skb); 378 skb_orphan(skb);
379 379
380 tunnel->stats.rx_packets++; 380 tunnel->stats.rx_packets++;
@@ -442,7 +442,7 @@ static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
442 * expect to send up next, dequeue it and any other 442 * expect to send up next, dequeue it and any other
443 * in-sequence packets behind it. 443 * in-sequence packets behind it.
444 */ 444 */
445 spin_lock(&session->reorder_q.lock); 445 spin_lock_bh(&session->reorder_q.lock);
446 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 446 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
447 if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) { 447 if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
448 session->stats.rx_seq_discards++; 448 session->stats.rx_seq_discards++;
@@ -455,6 +455,7 @@ static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
455 skb_queue_len(&session->reorder_q)); 455 skb_queue_len(&session->reorder_q));
456 __skb_unlink(skb, &session->reorder_q); 456 __skb_unlink(skb, &session->reorder_q);
457 kfree_skb(skb); 457 kfree_skb(skb);
458 sock_put(session->sock);
458 continue; 459 continue;
459 } 460 }
460 461
@@ -469,13 +470,18 @@ static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
469 goto out; 470 goto out;
470 } 471 }
471 } 472 }
472 spin_unlock(&session->reorder_q.lock); 473 __skb_unlink(skb, &session->reorder_q);
474
475 /* Process the skb. We release the queue lock while we
476 * do so to let other contexts process the queue.
477 */
478 spin_unlock_bh(&session->reorder_q.lock);
473 pppol2tp_recv_dequeue_skb(session, skb); 479 pppol2tp_recv_dequeue_skb(session, skb);
474 spin_lock(&session->reorder_q.lock); 480 spin_lock_bh(&session->reorder_q.lock);
475 } 481 }
476 482
477out: 483out:
478 spin_unlock(&session->reorder_q.lock); 484 spin_unlock_bh(&session->reorder_q.lock);
479} 485}
480 486
481/* Internal receive frame. Do the real work of receiving an L2TP data frame 487/* Internal receive frame. Do the real work of receiving an L2TP data frame
@@ -1058,7 +1064,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1058 1064
1059 /* Get routing info from the tunnel socket */ 1065 /* Get routing info from the tunnel socket */
1060 dst_release(skb->dst); 1066 dst_release(skb->dst);
1061 skb->dst = sk_dst_get(sk_tun); 1067 skb->dst = dst_clone(__sk_dst_get(sk_tun));
1062 skb_orphan(skb); 1068 skb_orphan(skb);
1063 skb->sk = sk_tun; 1069 skb->sk = sk_tun;
1064 1070
@@ -1106,10 +1112,12 @@ static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
1106 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1112 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1107 "%s: closing all sessions...\n", tunnel->name); 1113 "%s: closing all sessions...\n", tunnel->name);
1108 1114
1109 write_lock(&tunnel->hlist_lock); 1115 write_lock_bh(&tunnel->hlist_lock);
1110 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) { 1116 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
1111again: 1117again:
1112 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { 1118 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1119 struct sk_buff *skb;
1120
1113 session = hlist_entry(walk, struct pppol2tp_session, hlist); 1121 session = hlist_entry(walk, struct pppol2tp_session, hlist);
1114 1122
1115 sk = session->sock; 1123 sk = session->sock;
@@ -1126,7 +1134,7 @@ again:
1126 * disappear as we're jumping between locks. 1134 * disappear as we're jumping between locks.
1127 */ 1135 */
1128 sock_hold(sk); 1136 sock_hold(sk);
1129 write_unlock(&tunnel->hlist_lock); 1137 write_unlock_bh(&tunnel->hlist_lock);
1130 lock_sock(sk); 1138 lock_sock(sk);
1131 1139
1132 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { 1140 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
@@ -1138,7 +1146,10 @@ again:
1138 /* Purge any queued data */ 1146 /* Purge any queued data */
1139 skb_queue_purge(&sk->sk_receive_queue); 1147 skb_queue_purge(&sk->sk_receive_queue);
1140 skb_queue_purge(&sk->sk_write_queue); 1148 skb_queue_purge(&sk->sk_write_queue);
1141 skb_queue_purge(&session->reorder_q); 1149 while ((skb = skb_dequeue(&session->reorder_q))) {
1150 kfree_skb(skb);
1151 sock_put(sk);
1152 }
1142 1153
1143 release_sock(sk); 1154 release_sock(sk);
1144 sock_put(sk); 1155 sock_put(sk);
@@ -1148,11 +1159,11 @@ again:
1148 * list so we are guaranteed to make forward 1159 * list so we are guaranteed to make forward
1149 * progress. 1160 * progress.
1150 */ 1161 */
1151 write_lock(&tunnel->hlist_lock); 1162 write_lock_bh(&tunnel->hlist_lock);
1152 goto again; 1163 goto again;
1153 } 1164 }
1154 } 1165 }
1155 write_unlock(&tunnel->hlist_lock); 1166 write_unlock_bh(&tunnel->hlist_lock);
1156} 1167}
1157 1168
1158/* Really kill the tunnel. 1169/* Really kill the tunnel.
@@ -1161,9 +1172,9 @@ again:
1161static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) 1172static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
1162{ 1173{
1163 /* Remove from socket list */ 1174 /* Remove from socket list */
1164 write_lock(&pppol2tp_tunnel_list_lock); 1175 write_lock_bh(&pppol2tp_tunnel_list_lock);
1165 list_del_init(&tunnel->list); 1176 list_del_init(&tunnel->list);
1166 write_unlock(&pppol2tp_tunnel_list_lock); 1177 write_unlock_bh(&pppol2tp_tunnel_list_lock);
1167 1178
1168 atomic_dec(&pppol2tp_tunnel_count); 1179 atomic_dec(&pppol2tp_tunnel_count);
1169 kfree(tunnel); 1180 kfree(tunnel);
@@ -1239,9 +1250,9 @@ static void pppol2tp_session_destruct(struct sock *sk)
1239 /* Delete the session socket from the 1250 /* Delete the session socket from the
1240 * hash 1251 * hash
1241 */ 1252 */
1242 write_lock(&tunnel->hlist_lock); 1253 write_lock_bh(&tunnel->hlist_lock);
1243 hlist_del_init(&session->hlist); 1254 hlist_del_init(&session->hlist);
1244 write_unlock(&tunnel->hlist_lock); 1255 write_unlock_bh(&tunnel->hlist_lock);
1245 1256
1246 atomic_dec(&pppol2tp_session_count); 1257 atomic_dec(&pppol2tp_session_count);
1247 } 1258 }
@@ -1386,9 +1397,9 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
1386 1397
1387 /* Add tunnel to our list */ 1398 /* Add tunnel to our list */
1388 INIT_LIST_HEAD(&tunnel->list); 1399 INIT_LIST_HEAD(&tunnel->list);
1389 write_lock(&pppol2tp_tunnel_list_lock); 1400 write_lock_bh(&pppol2tp_tunnel_list_lock);
1390 list_add(&tunnel->list, &pppol2tp_tunnel_list); 1401 list_add(&tunnel->list, &pppol2tp_tunnel_list);
1391 write_unlock(&pppol2tp_tunnel_list_lock); 1402 write_unlock_bh(&pppol2tp_tunnel_list_lock);
1392 atomic_inc(&pppol2tp_tunnel_count); 1403 atomic_inc(&pppol2tp_tunnel_count);
1393 1404
1394 /* Bump the reference count. The tunnel context is deleted 1405 /* Bump the reference count. The tunnel context is deleted
@@ -1593,11 +1604,11 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1593 sk->sk_user_data = session; 1604 sk->sk_user_data = session;
1594 1605
1595 /* Add session to the tunnel's hash list */ 1606 /* Add session to the tunnel's hash list */
1596 write_lock(&tunnel->hlist_lock); 1607 write_lock_bh(&tunnel->hlist_lock);
1597 hlist_add_head(&session->hlist, 1608 hlist_add_head(&session->hlist,
1598 pppol2tp_session_id_hash(tunnel, 1609 pppol2tp_session_id_hash(tunnel,
1599 session->tunnel_addr.s_session)); 1610 session->tunnel_addr.s_session));
1600 write_unlock(&tunnel->hlist_lock); 1611 write_unlock_bh(&tunnel->hlist_lock);
1601 1612
1602 atomic_inc(&pppol2tp_session_count); 1613 atomic_inc(&pppol2tp_session_count);
1603 1614
@@ -2199,7 +2210,7 @@ static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, str
2199 int next = 0; 2210 int next = 0;
2200 int i; 2211 int i;
2201 2212
2202 read_lock(&tunnel->hlist_lock); 2213 read_lock_bh(&tunnel->hlist_lock);
2203 for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) { 2214 for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
2204 hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) { 2215 hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
2205 if (curr == NULL) { 2216 if (curr == NULL) {
@@ -2217,7 +2228,7 @@ static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, str
2217 } 2228 }
2218 } 2229 }
2219out: 2230out:
2220 read_unlock(&tunnel->hlist_lock); 2231 read_unlock_bh(&tunnel->hlist_lock);
2221 if (!found) 2232 if (!found)
2222 session = NULL; 2233 session = NULL;
2223 2234
@@ -2228,13 +2239,13 @@ static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_tunnel *curr)
2228{ 2239{
2229 struct pppol2tp_tunnel *tunnel = NULL; 2240 struct pppol2tp_tunnel *tunnel = NULL;
2230 2241
2231 read_lock(&pppol2tp_tunnel_list_lock); 2242 read_lock_bh(&pppol2tp_tunnel_list_lock);
2232 if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) { 2243 if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) {
2233 goto out; 2244 goto out;
2234 } 2245 }
2235 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); 2246 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
2236out: 2247out:
2237 read_unlock(&pppol2tp_tunnel_list_lock); 2248 read_unlock_bh(&pppol2tp_tunnel_list_lock);
2238 2249
2239 return tunnel; 2250 return tunnel;
2240} 2251}
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 750d2a99cb4f..daf5abab9534 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -2690,6 +2690,7 @@ int gelic_wl_driver_probe(struct gelic_card *card)
2690 return -ENOMEM; 2690 return -ENOMEM;
2691 2691
2692 /* setup net_device structure */ 2692 /* setup net_device structure */
2693 SET_NETDEV_DEV(netdev, &card->dev->core);
2693 gelic_wl_setup_netdev_ops(netdev); 2694 gelic_wl_setup_netdev_ops(netdev);
2694 2695
2695 /* setup some of net_device and register it */ 2696 /* setup some of net_device and register it */
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 6179a0a2032c..c72787adeba3 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1088,7 +1088,7 @@ static int s2io_print_pci_mode(struct s2io_nic *nic)
1088 * '-1' on failure 1088 * '-1' on failure
1089 */ 1089 */
1090 1090
1091int init_tti(struct s2io_nic *nic, int link) 1091static int init_tti(struct s2io_nic *nic, int link)
1092{ 1092{
1093 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1093 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1094 register u64 val64 = 0; 1094 register u64 val64 = 0;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 202fdf356621..20745fd4e973 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1633,13 +1633,18 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
1633static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, 1633static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1634 struct net_device *dev) 1634 struct net_device *dev)
1635{ 1635{
1636 u8 from; 1636 int rc;
1637
1638 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1639 if (rc < 0) {
1640 u8 reg;
1637 1641
1638 pci_read_config_byte(pdev, 0x73, &from); 1642 pci_read_config_byte(pdev, 0x73, &reg);
1639 1643
1640 return (from & 0x00000001) ? 1644 if (reg & 0x00000001)
1641 sis190_get_mac_addr_from_apc(pdev, dev) : 1645 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1642 sis190_get_mac_addr_from_eeprom(pdev, dev); 1646 }
1647 return rc;
1643} 1648}
1644 1649
1645static void sis190_set_speed_auto(struct net_device *dev) 1650static void sis190_set_speed_auto(struct net_device *dev)
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 9a6295909e43..54c662690f65 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -572,8 +572,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
572 default: 572 default:
573 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ 573 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
574 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; 574 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
575
575 /* turn off the Rx LED (LED_RX) */ 576 /* turn off the Rx LED (LED_RX) */
576 ledover &= ~PHY_M_LED_MO_RX; 577 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
577 } 578 }
578 579
579 if (hw->chip_id == CHIP_ID_YUKON_EC_U && 580 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
@@ -602,7 +603,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
602 603
603 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { 604 if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
604 /* turn on 100 Mbps LED (LED_LINK100) */ 605 /* turn on 100 Mbps LED (LED_LINK100) */
605 ledover |= PHY_M_LED_MO_100; 606 ledover |= PHY_M_LED_MO_100(MO_LED_ON);
606 } 607 }
607 608
608 if (ledover) 609 if (ledover)
@@ -3322,82 +3323,80 @@ static void sky2_set_multicast(struct net_device *dev)
3322/* Can have one global because blinking is controlled by 3323/* Can have one global because blinking is controlled by
3323 * ethtool and that is always under RTNL mutex 3324 * ethtool and that is always under RTNL mutex
3324 */ 3325 */
3325static void sky2_led(struct sky2_hw *hw, unsigned port, int on) 3326static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
3326{ 3327{
3327 u16 pg; 3328 struct sky2_hw *hw = sky2->hw;
3329 unsigned port = sky2->port;
3328 3330
3329 switch (hw->chip_id) { 3331 spin_lock_bh(&sky2->phy_lock);
3330 case CHIP_ID_YUKON_XL: 3332 if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3333 hw->chip_id == CHIP_ID_YUKON_EX ||
3334 hw->chip_id == CHIP_ID_YUKON_SUPR) {
3335 u16 pg;
3331 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 3336 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3332 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 3337 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3333 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3334 on ? (PHY_M_LEDC_LOS_CTRL(1) |
3335 PHY_M_LEDC_INIT_CTRL(7) |
3336 PHY_M_LEDC_STA1_CTRL(7) |
3337 PHY_M_LEDC_STA0_CTRL(7))
3338 : 0);
3339 3338
3340 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 3339 switch (mode) {
3341 break; 3340 case MO_LED_OFF:
3341 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3342 PHY_M_LEDC_LOS_CTRL(8) |
3343 PHY_M_LEDC_INIT_CTRL(8) |
3344 PHY_M_LEDC_STA1_CTRL(8) |
3345 PHY_M_LEDC_STA0_CTRL(8));
3346 break;
3347 case MO_LED_ON:
3348 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3349 PHY_M_LEDC_LOS_CTRL(9) |
3350 PHY_M_LEDC_INIT_CTRL(9) |
3351 PHY_M_LEDC_STA1_CTRL(9) |
3352 PHY_M_LEDC_STA0_CTRL(9));
3353 break;
3354 case MO_LED_BLINK:
3355 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3356 PHY_M_LEDC_LOS_CTRL(0xa) |
3357 PHY_M_LEDC_INIT_CTRL(0xa) |
3358 PHY_M_LEDC_STA1_CTRL(0xa) |
3359 PHY_M_LEDC_STA0_CTRL(0xa));
3360 break;
3361 case MO_LED_NORM:
3362 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3363 PHY_M_LEDC_LOS_CTRL(1) |
3364 PHY_M_LEDC_INIT_CTRL(8) |
3365 PHY_M_LEDC_STA1_CTRL(7) |
3366 PHY_M_LEDC_STA0_CTRL(7));
3367 }
3342 3368
3343 default: 3369 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3344 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 3370 } else
3345 gm_phy_write(hw, port, PHY_MARV_LED_OVER, 3371 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
3346 on ? PHY_M_LED_ALL : 0); 3372 PHY_M_LED_MO_DUP(mode) |
3347 } 3373 PHY_M_LED_MO_10(mode) |
3374 PHY_M_LED_MO_100(mode) |
3375 PHY_M_LED_MO_1000(mode) |
3376 PHY_M_LED_MO_RX(mode) |
3377 PHY_M_LED_MO_TX(mode));
3378
3379 spin_unlock_bh(&sky2->phy_lock);
3348} 3380}
3349 3381
3350/* blink LED's for finding board */ 3382/* blink LED's for finding board */
3351static int sky2_phys_id(struct net_device *dev, u32 data) 3383static int sky2_phys_id(struct net_device *dev, u32 data)
3352{ 3384{
3353 struct sky2_port *sky2 = netdev_priv(dev); 3385 struct sky2_port *sky2 = netdev_priv(dev);
3354 struct sky2_hw *hw = sky2->hw; 3386 unsigned int i;
3355 unsigned port = sky2->port;
3356 u16 ledctrl, ledover = 0;
3357 long ms;
3358 int interrupted;
3359 int onoff = 1;
3360 3387
3361 if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)) 3388 if (data == 0)
3362 ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT); 3389 data = UINT_MAX;
3363 else
3364 ms = data * 1000;
3365
3366 /* save initial values */
3367 spin_lock_bh(&sky2->phy_lock);
3368 if (hw->chip_id == CHIP_ID_YUKON_XL) {
3369 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3370 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3371 ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
3372 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3373 } else {
3374 ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
3375 ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
3376 }
3377
3378 interrupted = 0;
3379 while (!interrupted && ms > 0) {
3380 sky2_led(hw, port, onoff);
3381 onoff = !onoff;
3382
3383 spin_unlock_bh(&sky2->phy_lock);
3384 interrupted = msleep_interruptible(250);
3385 spin_lock_bh(&sky2->phy_lock);
3386
3387 ms -= 250;
3388 }
3389 3390
3390 /* resume regularly scheduled programming */ 3391 for (i = 0; i < data; i++) {
3391 if (hw->chip_id == CHIP_ID_YUKON_XL) { 3392 sky2_led(sky2, MO_LED_ON);
3392 u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 3393 if (msleep_interruptible(500))
3393 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); 3394 break;
3394 gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl); 3395 sky2_led(sky2, MO_LED_OFF);
3395 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 3396 if (msleep_interruptible(500))
3396 } else { 3397 break;
3397 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
3398 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
3399 } 3398 }
3400 spin_unlock_bh(&sky2->phy_lock); 3399 sky2_led(sky2, MO_LED_NORM);
3401 3400
3402 return 0; 3401 return 0;
3403} 3402}
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 5ab5c1c7c5aa..7bb3ba9bcbd8 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1318,18 +1318,21 @@ enum {
1318 BLINK_670MS = 4,/* 670 ms */ 1318 BLINK_670MS = 4,/* 670 ms */
1319}; 1319};
1320 1320
1321/**** PHY_MARV_LED_OVER 16 bit r/w LED control */ 1321/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1322enum { 1322#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */
1323 PHY_M_LED_MO_DUP = 3<<10,/* Bit 11..10: Duplex */ 1323
1324 PHY_M_LED_MO_10 = 3<<8, /* Bit 9.. 8: Link 10 */ 1324#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */
1325 PHY_M_LED_MO_100 = 3<<6, /* Bit 7.. 6: Link 100 */ 1325#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */
1326 PHY_M_LED_MO_1000 = 3<<4, /* Bit 5.. 4: Link 1000 */ 1326#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */
1327 PHY_M_LED_MO_RX = 3<<2, /* Bit 3.. 2: Rx */ 1327#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */
1328 PHY_M_LED_MO_TX = 3<<0, /* Bit 1.. 0: Tx */ 1328#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */
1329 1329#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */
1330 PHY_M_LED_ALL = PHY_M_LED_MO_DUP | PHY_M_LED_MO_10 1330
1331 | PHY_M_LED_MO_100 | PHY_M_LED_MO_1000 1331enum led_mode {
1332 | PHY_M_LED_MO_RX, 1332 MO_LED_NORM = 0,
1333 MO_LED_BLINK = 1,
1334 MO_LED_OFF = 2,
1335 MO_LED_ON = 3,
1333}; 1336};
1334 1337
1335/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ 1338/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index db606b603884..26ffb67f1da2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -8781,7 +8781,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
8781 return -EAGAIN; 8781 return -EAGAIN;
8782 8782
8783 if (data == 0) 8783 if (data == 0)
8784 data = 2; 8784 data = UINT_MAX / 2;
8785 8785
8786 for (i = 0; i < (data * 2); i++) { 8786 for (i = 0; i < (data * 2); i++) {
8787 if ((i % 2) == 0) 8787 if ((i % 2) == 0)
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 3af5b92b48c8..0166407d7061 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1400,7 +1400,7 @@ static void TLan_SetMulticastList( struct net_device *dev )
1400 * 1400 *
1401 **************************************************************/ 1401 **************************************************************/
1402 1402
1403u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int ) 1403static u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
1404{ 1404{
1405 /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */ 1405 /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */
1406 return 0; 1406 return 0;
@@ -1432,7 +1432,7 @@ u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int )
1432 * 1432 *
1433 **************************************************************/ 1433 **************************************************************/
1434 1434
1435u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) 1435static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1436{ 1436{
1437 TLanPrivateInfo *priv = netdev_priv(dev); 1437 TLanPrivateInfo *priv = netdev_priv(dev);
1438 int eoc = 0; 1438 int eoc = 0;
@@ -1518,7 +1518,7 @@ u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1518 * 1518 *
1519 **************************************************************/ 1519 **************************************************************/
1520 1520
1521u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) 1521static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
1522{ 1522{
1523 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1523 TLan_ReadAndClearStats( dev, TLAN_RECORD );
1524 1524
@@ -1554,7 +1554,7 @@ u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
1554 * 1554 *
1555 **************************************************************/ 1555 **************************************************************/
1556 1556
1557u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) 1557static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1558{ 1558{
1559 TLanPrivateInfo *priv = netdev_priv(dev); 1559 TLanPrivateInfo *priv = netdev_priv(dev);
1560 u32 ack = 0; 1560 u32 ack = 0;
@@ -1689,7 +1689,7 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1689 * 1689 *
1690 **************************************************************/ 1690 **************************************************************/
1691 1691
1692u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) 1692static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
1693{ 1693{
1694 printk( "TLAN: Test interrupt on %s.\n", dev->name ); 1694 printk( "TLAN: Test interrupt on %s.\n", dev->name );
1695 return 1; 1695 return 1;
@@ -1719,7 +1719,7 @@ u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
1719 * 1719 *
1720 **************************************************************/ 1720 **************************************************************/
1721 1721
1722u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) 1722static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
1723{ 1723{
1724 TLanPrivateInfo *priv = netdev_priv(dev); 1724 TLanPrivateInfo *priv = netdev_priv(dev);
1725 TLanList *head_list; 1725 TLanList *head_list;
@@ -1767,7 +1767,7 @@ u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
1767 * 1767 *
1768 **************************************************************/ 1768 **************************************************************/
1769 1769
1770u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) 1770static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1771{ 1771{
1772 TLanPrivateInfo *priv = netdev_priv(dev); 1772 TLanPrivateInfo *priv = netdev_priv(dev);
1773 u32 ack; 1773 u32 ack;
@@ -1842,7 +1842,7 @@ u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
1842 * 1842 *
1843 **************************************************************/ 1843 **************************************************************/
1844 1844
1845u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) 1845static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1846{ 1846{
1847 TLanPrivateInfo *priv = netdev_priv(dev); 1847 TLanPrivateInfo *priv = netdev_priv(dev);
1848 dma_addr_t head_list_phys; 1848 dma_addr_t head_list_phys;
@@ -1902,7 +1902,7 @@ u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
1902 * 1902 *
1903 **************************************************************/ 1903 **************************************************************/
1904 1904
1905void TLan_Timer( unsigned long data ) 1905static void TLan_Timer( unsigned long data )
1906{ 1906{
1907 struct net_device *dev = (struct net_device *) data; 1907 struct net_device *dev = (struct net_device *) data;
1908 TLanPrivateInfo *priv = netdev_priv(dev); 1908 TLanPrivateInfo *priv = netdev_priv(dev);
@@ -1983,7 +1983,7 @@ void TLan_Timer( unsigned long data )
1983 * 1983 *
1984 **************************************************************/ 1984 **************************************************************/
1985 1985
1986void TLan_ResetLists( struct net_device *dev ) 1986static void TLan_ResetLists( struct net_device *dev )
1987{ 1987{
1988 TLanPrivateInfo *priv = netdev_priv(dev); 1988 TLanPrivateInfo *priv = netdev_priv(dev);
1989 int i; 1989 int i;
@@ -2043,7 +2043,7 @@ void TLan_ResetLists( struct net_device *dev )
2043} /* TLan_ResetLists */ 2043} /* TLan_ResetLists */
2044 2044
2045 2045
2046void TLan_FreeLists( struct net_device *dev ) 2046static void TLan_FreeLists( struct net_device *dev )
2047{ 2047{
2048 TLanPrivateInfo *priv = netdev_priv(dev); 2048 TLanPrivateInfo *priv = netdev_priv(dev);
2049 int i; 2049 int i;
@@ -2092,7 +2092,7 @@ void TLan_FreeLists( struct net_device *dev )
2092 * 2092 *
2093 **************************************************************/ 2093 **************************************************************/
2094 2094
2095void TLan_PrintDio( u16 io_base ) 2095static void TLan_PrintDio( u16 io_base )
2096{ 2096{
2097 u32 data0, data1; 2097 u32 data0, data1;
2098 int i; 2098 int i;
@@ -2127,7 +2127,7 @@ void TLan_PrintDio( u16 io_base )
2127 * 2127 *
2128 **************************************************************/ 2128 **************************************************************/
2129 2129
2130void TLan_PrintList( TLanList *list, char *type, int num) 2130static void TLan_PrintList( TLanList *list, char *type, int num)
2131{ 2131{
2132 int i; 2132 int i;
2133 2133
@@ -2163,7 +2163,7 @@ void TLan_PrintList( TLanList *list, char *type, int num)
2163 * 2163 *
2164 **************************************************************/ 2164 **************************************************************/
2165 2165
2166void TLan_ReadAndClearStats( struct net_device *dev, int record ) 2166static void TLan_ReadAndClearStats( struct net_device *dev, int record )
2167{ 2167{
2168 TLanPrivateInfo *priv = netdev_priv(dev); 2168 TLanPrivateInfo *priv = netdev_priv(dev);
2169 u32 tx_good, tx_under; 2169 u32 tx_good, tx_under;
@@ -2238,7 +2238,7 @@ void TLan_ReadAndClearStats( struct net_device *dev, int record )
2238 * 2238 *
2239 **************************************************************/ 2239 **************************************************************/
2240 2240
2241void 2241static void
2242TLan_ResetAdapter( struct net_device *dev ) 2242TLan_ResetAdapter( struct net_device *dev )
2243{ 2243{
2244 TLanPrivateInfo *priv = netdev_priv(dev); 2244 TLanPrivateInfo *priv = netdev_priv(dev);
@@ -2324,7 +2324,7 @@ TLan_ResetAdapter( struct net_device *dev )
2324 2324
2325 2325
2326 2326
2327void 2327static void
2328TLan_FinishReset( struct net_device *dev ) 2328TLan_FinishReset( struct net_device *dev )
2329{ 2329{
2330 TLanPrivateInfo *priv = netdev_priv(dev); 2330 TLanPrivateInfo *priv = netdev_priv(dev);
@@ -2448,7 +2448,7 @@ TLan_FinishReset( struct net_device *dev )
2448 * 2448 *
2449 **************************************************************/ 2449 **************************************************************/
2450 2450
2451void TLan_SetMac( struct net_device *dev, int areg, char *mac ) 2451static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2452{ 2452{
2453 int i; 2453 int i;
2454 2454
@@ -2490,7 +2490,7 @@ void TLan_SetMac( struct net_device *dev, int areg, char *mac )
2490 * 2490 *
2491 ********************************************************************/ 2491 ********************************************************************/
2492 2492
2493void TLan_PhyPrint( struct net_device *dev ) 2493static void TLan_PhyPrint( struct net_device *dev )
2494{ 2494{
2495 TLanPrivateInfo *priv = netdev_priv(dev); 2495 TLanPrivateInfo *priv = netdev_priv(dev);
2496 u16 i, data0, data1, data2, data3, phy; 2496 u16 i, data0, data1, data2, data3, phy;
@@ -2539,7 +2539,7 @@ void TLan_PhyPrint( struct net_device *dev )
2539 * 2539 *
2540 ********************************************************************/ 2540 ********************************************************************/
2541 2541
2542void TLan_PhyDetect( struct net_device *dev ) 2542static void TLan_PhyDetect( struct net_device *dev )
2543{ 2543{
2544 TLanPrivateInfo *priv = netdev_priv(dev); 2544 TLanPrivateInfo *priv = netdev_priv(dev);
2545 u16 control; 2545 u16 control;
@@ -2586,7 +2586,7 @@ void TLan_PhyDetect( struct net_device *dev )
2586 2586
2587 2587
2588 2588
2589void TLan_PhyPowerDown( struct net_device *dev ) 2589static void TLan_PhyPowerDown( struct net_device *dev )
2590{ 2590{
2591 TLanPrivateInfo *priv = netdev_priv(dev); 2591 TLanPrivateInfo *priv = netdev_priv(dev);
2592 u16 value; 2592 u16 value;
@@ -2611,7 +2611,7 @@ void TLan_PhyPowerDown( struct net_device *dev )
2611 2611
2612 2612
2613 2613
2614void TLan_PhyPowerUp( struct net_device *dev ) 2614static void TLan_PhyPowerUp( struct net_device *dev )
2615{ 2615{
2616 TLanPrivateInfo *priv = netdev_priv(dev); 2616 TLanPrivateInfo *priv = netdev_priv(dev);
2617 u16 value; 2617 u16 value;
@@ -2632,7 +2632,7 @@ void TLan_PhyPowerUp( struct net_device *dev )
2632 2632
2633 2633
2634 2634
2635void TLan_PhyReset( struct net_device *dev ) 2635static void TLan_PhyReset( struct net_device *dev )
2636{ 2636{
2637 TLanPrivateInfo *priv = netdev_priv(dev); 2637 TLanPrivateInfo *priv = netdev_priv(dev);
2638 u16 phy; 2638 u16 phy;
@@ -2660,7 +2660,7 @@ void TLan_PhyReset( struct net_device *dev )
2660 2660
2661 2661
2662 2662
2663void TLan_PhyStartLink( struct net_device *dev ) 2663static void TLan_PhyStartLink( struct net_device *dev )
2664{ 2664{
2665 TLanPrivateInfo *priv = netdev_priv(dev); 2665 TLanPrivateInfo *priv = netdev_priv(dev);
2666 u16 ability; 2666 u16 ability;
@@ -2747,7 +2747,7 @@ void TLan_PhyStartLink( struct net_device *dev )
2747 2747
2748 2748
2749 2749
2750void TLan_PhyFinishAutoNeg( struct net_device *dev ) 2750static void TLan_PhyFinishAutoNeg( struct net_device *dev )
2751{ 2751{
2752 TLanPrivateInfo *priv = netdev_priv(dev); 2752 TLanPrivateInfo *priv = netdev_priv(dev);
2753 u16 an_adv; 2753 u16 an_adv;
@@ -2903,7 +2903,7 @@ void TLan_PhyMonitor( struct net_device *dev )
2903 * 2903 *
2904 **************************************************************/ 2904 **************************************************************/
2905 2905
2906int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) 2906static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2907{ 2907{
2908 u8 nack; 2908 u8 nack;
2909 u16 sio, tmp; 2909 u16 sio, tmp;
@@ -2993,7 +2993,7 @@ int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
2993 * 2993 *
2994 **************************************************************/ 2994 **************************************************************/
2995 2995
2996void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) 2996static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
2997{ 2997{
2998 u16 sio; 2998 u16 sio;
2999 u32 i; 2999 u32 i;
@@ -3035,7 +3035,7 @@ void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
3035 * 3035 *
3036 **************************************************************/ 3036 **************************************************************/
3037 3037
3038void TLan_MiiSync( u16 base_port ) 3038static void TLan_MiiSync( u16 base_port )
3039{ 3039{
3040 int i; 3040 int i;
3041 u16 sio; 3041 u16 sio;
@@ -3074,7 +3074,7 @@ void TLan_MiiSync( u16 base_port )
3074 * 3074 *
3075 **************************************************************/ 3075 **************************************************************/
3076 3076
3077void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) 3077static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
3078{ 3078{
3079 u16 sio; 3079 u16 sio;
3080 int minten; 3080 int minten;
@@ -3144,7 +3144,7 @@ void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
3144 * 3144 *
3145 **************************************************************/ 3145 **************************************************************/
3146 3146
3147void TLan_EeSendStart( u16 io_base ) 3147static void TLan_EeSendStart( u16 io_base )
3148{ 3148{
3149 u16 sio; 3149 u16 sio;
3150 3150
@@ -3184,7 +3184,7 @@ void TLan_EeSendStart( u16 io_base )
3184 * 3184 *
3185 **************************************************************/ 3185 **************************************************************/
3186 3186
3187int TLan_EeSendByte( u16 io_base, u8 data, int stop ) 3187static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
3188{ 3188{
3189 int err; 3189 int err;
3190 u8 place; 3190 u8 place;
@@ -3245,7 +3245,7 @@ int TLan_EeSendByte( u16 io_base, u8 data, int stop )
3245 * 3245 *
3246 **************************************************************/ 3246 **************************************************************/
3247 3247
3248void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) 3248static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
3249{ 3249{
3250 u8 place; 3250 u8 place;
3251 u16 sio; 3251 u16 sio;
@@ -3303,7 +3303,7 @@ void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
3303 * 3303 *
3304 **************************************************************/ 3304 **************************************************************/
3305 3305
3306int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) 3306static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
3307{ 3307{
3308 int err; 3308 int err;
3309 TLanPrivateInfo *priv = netdev_priv(dev); 3309 TLanPrivateInfo *priv = netdev_priv(dev);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 77d9dd7ea34f..567c62757e9d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -910,7 +910,8 @@ static void de_set_media (struct de_private *de)
910 unsigned media = de->media_type; 910 unsigned media = de->media_type;
911 u32 macmode = dr32(MacMode); 911 u32 macmode = dr32(MacMode);
912 912
913 BUG_ON(de_is_running(de)); 913 if (de_is_running(de))
914 printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name);
914 915
915 if (de->de21040) 916 if (de->de21040)
916 dw32(CSR11, FULL_DUPLEX_MAGIC); 917 dw32(CSR11, FULL_DUPLEX_MAGIC);
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a7afeea156bd..a59c1f224aa8 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -482,9 +482,11 @@ static void uli526x_init(struct net_device *dev)
482 struct uli526x_board_info *db = netdev_priv(dev); 482 struct uli526x_board_info *db = netdev_priv(dev);
483 unsigned long ioaddr = db->ioaddr; 483 unsigned long ioaddr = db->ioaddr;
484 u8 phy_tmp; 484 u8 phy_tmp;
485 u8 timeout;
485 u16 phy_value; 486 u16 phy_value;
486 u16 phy_reg_reset; 487 u16 phy_reg_reset;
487 488
489
488 ULI526X_DBUG(0, "uli526x_init()", 0); 490 ULI526X_DBUG(0, "uli526x_init()", 0);
489 491
490 /* Reset M526x MAC controller */ 492 /* Reset M526x MAC controller */
@@ -509,11 +511,19 @@ static void uli526x_init(struct net_device *dev)
509 /* Parser SROM and media mode */ 511 /* Parser SROM and media mode */
510 db->media_mode = uli526x_media_mode; 512 db->media_mode = uli526x_media_mode;
511 513
512 /* Phyxcer capability setting */ 514 /* phyxcer capability setting */
513 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); 515 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
514 phy_reg_reset = (phy_reg_reset | 0x8000); 516 phy_reg_reset = (phy_reg_reset | 0x8000);
515 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); 517 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
518
519 /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
520 * functions") or phy data sheet for details on phy reset
521 */
516 udelay(500); 522 udelay(500);
523 timeout = 10;
524 while (timeout-- &&
525 phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000)
526 udelay(100);
517 527
518 /* Process Phyxcer Media Mode */ 528 /* Process Phyxcer Media Mode */
519 uli526x_set_phyxcer(db); 529 uli526x_set_phyxcer(db);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 038c1ef94d2e..7b816a032957 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -663,7 +663,11 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
663 case SIOCSIFHWADDR: 663 case SIOCSIFHWADDR:
664 { 664 {
665 /* try to set the actual net device's hw address */ 665 /* try to set the actual net device's hw address */
666 int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 666 int ret;
667
668 rtnl_lock();
669 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
670 rtnl_unlock();
667 671
668 if (ret == 0) { 672 if (ret == 0) {
669 /** Set the character device's hardware address. This is used when 673 /** Set the character device's hardware address. This is used when
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 3f67a29593bc..e2ad98bee6e7 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -244,18 +244,6 @@ static int veth_open(struct net_device *dev)
244 return 0; 244 return 0;
245} 245}
246 246
247static int veth_close(struct net_device *dev)
248{
249 struct veth_priv *priv;
250
251 if (netif_carrier_ok(dev)) {
252 priv = netdev_priv(dev);
253 netif_carrier_off(dev);
254 netif_carrier_off(priv->peer);
255 }
256 return 0;
257}
258
259static int veth_dev_init(struct net_device *dev) 247static int veth_dev_init(struct net_device *dev)
260{ 248{
261 struct veth_net_stats *stats; 249 struct veth_net_stats *stats;
@@ -286,13 +274,50 @@ static void veth_setup(struct net_device *dev)
286 dev->hard_start_xmit = veth_xmit; 274 dev->hard_start_xmit = veth_xmit;
287 dev->get_stats = veth_get_stats; 275 dev->get_stats = veth_get_stats;
288 dev->open = veth_open; 276 dev->open = veth_open;
289 dev->stop = veth_close;
290 dev->ethtool_ops = &veth_ethtool_ops; 277 dev->ethtool_ops = &veth_ethtool_ops;
291 dev->features |= NETIF_F_LLTX; 278 dev->features |= NETIF_F_LLTX;
292 dev->init = veth_dev_init; 279 dev->init = veth_dev_init;
293 dev->destructor = veth_dev_free; 280 dev->destructor = veth_dev_free;
294} 281}
295 282
283static void veth_change_state(struct net_device *dev)
284{
285 struct net_device *peer;
286 struct veth_priv *priv;
287
288 priv = netdev_priv(dev);
289 peer = priv->peer;
290
291 if (netif_carrier_ok(peer)) {
292 if (!netif_carrier_ok(dev))
293 netif_carrier_on(dev);
294 } else {
295 if (netif_carrier_ok(dev))
296 netif_carrier_off(dev);
297 }
298}
299
300static int veth_device_event(struct notifier_block *unused,
301 unsigned long event, void *ptr)
302{
303 struct net_device *dev = ptr;
304
305 if (dev->open != veth_open)
306 goto out;
307
308 switch (event) {
309 case NETDEV_CHANGE:
310 veth_change_state(dev);
311 break;
312 }
313out:
314 return NOTIFY_DONE;
315}
316
317static struct notifier_block veth_notifier_block __read_mostly = {
318 .notifier_call = veth_device_event,
319};
320
296/* 321/*
297 * netlink interface 322 * netlink interface
298 */ 323 */
@@ -454,12 +479,14 @@ static struct rtnl_link_ops veth_link_ops = {
454 479
455static __init int veth_init(void) 480static __init int veth_init(void)
456{ 481{
482 register_netdevice_notifier(&veth_notifier_block);
457 return rtnl_link_register(&veth_link_ops); 483 return rtnl_link_register(&veth_link_ops);
458} 484}
459 485
460static __exit void veth_exit(void) 486static __exit void veth_exit(void)
461{ 487{
462 rtnl_link_unregister(&veth_link_ops); 488 rtnl_link_unregister(&veth_link_ops);
489 unregister_netdevice_notifier(&veth_notifier_block);
463} 490}
464 491
465module_init(veth_init); 492module_init(veth_init);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 7c851b1e6daa..8c9d6ae2bb31 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1893,7 +1893,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
1893 1893
1894 /* Make sure we use pattern 0, 1 and not 4, 5 */ 1894 /* Make sure we use pattern 0, 1 and not 4, 5 */
1895 if (rp->quirks & rq6patterns) 1895 if (rp->quirks & rq6patterns)
1896 iowrite8(0x04, ioaddr + 0xA7); 1896 iowrite8(0x04, ioaddr + WOLcgClr);
1897 1897
1898 if (rp->wolopts & WAKE_MAGIC) { 1898 if (rp->wolopts & WAKE_MAGIC) {
1899 iowrite8(WOLmagic, ioaddr + WOLcrSet); 1899 iowrite8(WOLmagic, ioaddr + WOLcrSet);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fdc23678117b..19fd4cb0ddf8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -361,6 +361,7 @@ static int virtnet_probe(struct virtio_device *vdev)
361 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); 361 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
362 vi->dev = dev; 362 vi->dev = dev;
363 vi->vdev = vdev; 363 vi->vdev = vdev;
364 vdev->priv = vi;
364 365
365 /* We expect two virtqueues, receive then send. */ 366 /* We expect two virtqueues, receive then send. */
366 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 367 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
@@ -395,7 +396,6 @@ static int virtnet_probe(struct virtio_device *vdev)
395 } 396 }
396 397
397 pr_debug("virtnet: registered device %s\n", dev->name); 398 pr_debug("virtnet: registered device %s\n", dev->name);
398 vdev->priv = vi;
399 return 0; 399 return 0;
400 400
401unregister: 401unregister:
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 15d5c58e57bc..e59255a155a9 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -751,7 +751,7 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
751} 751}
752 752
753 753
754static __inline void 754static inline void
755send_complete( struct net_local *nl ) 755send_complete( struct net_local *nl )
756{ 756{
757#ifdef CONFIG_SBNI_MULTILINE 757#ifdef CONFIG_SBNI_MULTILINE
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index c79066b38d3b..69dea3392612 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -1035,7 +1035,7 @@ struct ath5k_hw {
1035 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 1035 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
1036 unsigned int, unsigned int, unsigned int, unsigned int, 1036 unsigned int, unsigned int, unsigned int, unsigned int,
1037 unsigned int, unsigned int, unsigned int); 1037 unsigned int, unsigned int, unsigned int);
1038 bool (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1038 int (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1039 unsigned int, unsigned int, unsigned int, unsigned int, 1039 unsigned int, unsigned int, unsigned int, unsigned int,
1040 unsigned int, unsigned int); 1040 unsigned int, unsigned int);
1041 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *); 1041 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *);
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index dfdaec020739..bef967ce34a6 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -668,7 +668,10 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
668 * return false w/o doing anything. MAC's that do 668 * return false w/o doing anything. MAC's that do
669 * support it will return true w/o doing anything. 669 * support it will return true w/o doing anything.
670 */ 670 */
671 if (ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0)) 671 ret = ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
672 if (ret < 0)
673 goto err;
674 if (ret > 0)
672 __set_bit(ATH_STAT_MRRETRY, sc->status); 675 __set_bit(ATH_STAT_MRRETRY, sc->status);
673 676
674 /* 677 /*
@@ -1715,6 +1718,7 @@ ath5k_tasklet_rx(unsigned long data)
1715 break; 1718 break;
1716 else if (unlikely(ret)) { 1719 else if (unlikely(ret)) {
1717 ATH5K_ERR(sc, "error in processing rx descriptor\n"); 1720 ATH5K_ERR(sc, "error in processing rx descriptor\n");
1721 spin_unlock(&sc->rxbuflock);
1718 return; 1722 return;
1719 } 1723 }
1720 1724
@@ -2126,8 +2130,9 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2126 "updated timers based on beacon TSF\n"); 2130 "updated timers based on beacon TSF\n");
2127 2131
2128 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 2132 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2129 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", 2133 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
2130 bc_tsf, hw_tsf, bc_tu, hw_tu, nexttbtt); 2134 (unsigned long long) bc_tsf,
2135 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2131 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", 2136 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2132 intval & AR5K_BEACON_PERIOD, 2137 intval & AR5K_BEACON_PERIOD,
2133 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", 2138 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
@@ -2385,10 +2390,11 @@ ath5k_intr(int irq, void *dev_id)
2385 u64 tsf = ath5k_hw_get_tsf64(ah); 2390 u64 tsf = ath5k_hw_get_tsf64(ah);
2386 sc->nexttbtt += sc->bintval; 2391 sc->nexttbtt += sc->bintval;
2387 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 2392 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2388 "SWBA nexttbtt: %x hw_tu: %x " 2393 "SWBA nexttbtt: %x hw_tu: %x "
2389 "TSF: %llx\n", 2394 "TSF: %llx\n",
2390 sc->nexttbtt, 2395 sc->nexttbtt,
2391 TSF_TO_TU(tsf), tsf); 2396 TSF_TO_TU(tsf),
2397 (unsigned long long) tsf);
2392 } else { 2398 } else {
2393 ath5k_beacon_send(sc); 2399 ath5k_beacon_send(sc);
2394 } 2400 }
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
index 1ab57aa6e4dc..c2de2d958e8e 100644
--- a/drivers/net/wireless/ath5k/hw.c
+++ b/drivers/net/wireless/ath5k/hw.c
@@ -45,7 +45,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
45 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 45 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
46 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, 46 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
47 unsigned int, unsigned int); 47 unsigned int, unsigned int);
48static bool ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *, 48static int ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
49 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, 49 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
50 unsigned int); 50 unsigned int);
51static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *); 51static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *);
@@ -3743,7 +3743,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3743/* 3743/*
3744 * Initialize a 4-word multirate tx descriptor on 5212 3744 * Initialize a 4-word multirate tx descriptor on 5212
3745 */ 3745 */
3746static bool 3746static int
3747ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 3747ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3748 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2, 3748 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2,
3749 unsigned int tx_rate3, u_int tx_tries3) 3749 unsigned int tx_rate3, u_int tx_tries3)
@@ -3783,10 +3783,10 @@ ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3783 3783
3784#undef _XTX_TRIES 3784#undef _XTX_TRIES
3785 3785
3786 return true; 3786 return 1;
3787 } 3787 }
3788 3788
3789 return false; 3789 return 0;
3790} 3790}
3791 3791
3792/* 3792/*
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 1a2141dabdc7..8bc4bc4c330e 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -32,6 +32,7 @@ config B43_PCI_AUTOSELECT
32 bool 32 bool
33 depends on B43 && SSB_PCIHOST_POSSIBLE 33 depends on B43 && SSB_PCIHOST_POSSIBLE
34 select SSB_PCIHOST 34 select SSB_PCIHOST
35 select SSB_B43_PCI_BRIDGE
35 default y 36 default y
36 37
37# Auto-select SSB PCICORE driver, if possible 38# Auto-select SSB PCICORE driver, if possible
diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig
index 6745579ba96d..13c65faf0247 100644
--- a/drivers/net/wireless/b43legacy/Kconfig
+++ b/drivers/net/wireless/b43legacy/Kconfig
@@ -25,6 +25,7 @@ config B43LEGACY_PCI_AUTOSELECT
25 bool 25 bool
26 depends on B43LEGACY && SSB_PCIHOST_POSSIBLE 26 depends on B43LEGACY && SSB_PCIHOST_POSSIBLE
27 select SSB_PCIHOST 27 select SSB_PCIHOST
28 select SSB_B43_PCI_BRIDGE
28 default y 29 default y
29 30
30# Auto-select SSB PCICORE driver, if possible 31# Auto-select SSB PCICORE driver, if possible
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index c39de422e220..5f3f34e1dbfd 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3829,7 +3829,7 @@ static void b43legacy_print_driverinfo(void)
3829#ifdef CONFIG_B43LEGACY_DMA 3829#ifdef CONFIG_B43LEGACY_DMA
3830 feat_dma = "D"; 3830 feat_dma = "D";
3831#endif 3831#endif
3832 printk(KERN_INFO "Broadcom 43xx driver loaded " 3832 printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
3833 "[ Features: %s%s%s%s%s, Firmware-ID: " 3833 "[ Features: %s%s%s%s%s, Firmware-ID: "
3834 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n", 3834 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
3835 feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma); 3835 feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig
index 0159701e8456..afb8f4305c24 100644
--- a/drivers/net/wireless/bcm43xx/Kconfig
+++ b/drivers/net/wireless/bcm43xx/Kconfig
@@ -1,6 +1,6 @@
1config BCM43XX 1config BCM43XX
2 tristate "Broadcom BCM43xx wireless support (DEPRECATED)" 2 tristate "Broadcom BCM43xx wireless support (DEPRECATED)"
3 depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL 3 depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && (!SSB_B43_PCI_BRIDGE || SSB != y) && EXPERIMENTAL
4 select WIRELESS_EXT 4 select WIRELESS_EXT
5 select FW_LOADER 5 select FW_LOADER
6 select HW_RANDOM 6 select HW_RANDOM
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index eab020338fde..b3c1acbcc655 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -1040,7 +1040,6 @@ int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
1040 lbs_deb_leave(LBS_DEB_CMD); 1040 lbs_deb_leave(LBS_DEB_CMD);
1041 return ret; 1041 return ret;
1042} 1042}
1043EXPORT_SYMBOL_GPL(lbs_mesh_access);
1044 1043
1045int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan) 1044int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan)
1046{ 1045{
@@ -1576,7 +1575,6 @@ done:
1576 lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); 1575 lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret);
1577 return ret; 1576 return ret;
1578} 1577}
1579EXPORT_SYMBOL_GPL(lbs_prepare_and_send_command);
1580 1578
1581/** 1579/**
1582 * @brief This function allocates the command buffer and link 1580 * @brief This function allocates the command buffer and link
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 159216a91903..f0ef7081bdeb 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -562,9 +562,7 @@ int lbs_process_rx_command(struct lbs_private *priv)
562 } 562 }
563 563
564 resp = (void *)priv->upld_buf; 564 resp = (void *)priv->upld_buf;
565 565 curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command);
566 curcmd = le16_to_cpu(resp->command);
567
568 respcmd = le16_to_cpu(resp->command); 566 respcmd = le16_to_cpu(resp->command);
569 result = le16_to_cpu(resp->result); 567 result = le16_to_cpu(resp->result);
570 568
@@ -572,15 +570,15 @@ int lbs_process_rx_command(struct lbs_private *priv)
572 respcmd, le16_to_cpu(resp->seqnum), priv->upld_len, jiffies); 570 respcmd, le16_to_cpu(resp->seqnum), priv->upld_len, jiffies);
573 lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", (void *) resp, priv->upld_len); 571 lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", (void *) resp, priv->upld_len);
574 572
575 if (resp->seqnum != resp->seqnum) { 573 if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
576 lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n", 574 lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n",
577 le16_to_cpu(resp->seqnum), le16_to_cpu(resp->seqnum)); 575 le16_to_cpu(resp->seqnum), le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum));
578 spin_unlock_irqrestore(&priv->driver_lock, flags); 576 spin_unlock_irqrestore(&priv->driver_lock, flags);
579 ret = -1; 577 ret = -1;
580 goto done; 578 goto done;
581 } 579 }
582 if (respcmd != CMD_RET(curcmd) && 580 if (respcmd != CMD_RET(curcmd) &&
583 respcmd != CMD_802_11_ASSOCIATE && curcmd != CMD_RET_802_11_ASSOCIATE) { 581 respcmd != CMD_RET_802_11_ASSOCIATE && curcmd != CMD_802_11_ASSOCIATE) {
584 lbs_pr_info("Invalid CMD_RESP %x to command %x!\n", respcmd, curcmd); 582 lbs_pr_info("Invalid CMD_RESP %x to command %x!\n", respcmd, curcmd);
585 spin_unlock_irqrestore(&priv->driver_lock, flags); 583 spin_unlock_irqrestore(&priv->driver_lock, flags);
586 ret = -1; 584 ret = -1;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index aaacd9bd6bd2..4e22341b4f3d 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -69,7 +69,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
69int lbs_remove_card(struct lbs_private *priv); 69int lbs_remove_card(struct lbs_private *priv);
70int lbs_start_card(struct lbs_private *priv); 70int lbs_start_card(struct lbs_private *priv);
71int lbs_stop_card(struct lbs_private *priv); 71int lbs_stop_card(struct lbs_private *priv);
72int lbs_reset_device(struct lbs_private *priv);
73void lbs_host_to_card_done(struct lbs_private *priv); 72void lbs_host_to_card_done(struct lbs_private *priv);
74 73
75int lbs_update_channel(struct lbs_private *priv); 74int lbs_update_channel(struct lbs_private *priv);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 84fb49ca0fae..4d4e2f3b66ac 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -1351,8 +1351,6 @@ done:
1351 lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); 1351 lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret);
1352 return ret; 1352 return ret;
1353} 1353}
1354EXPORT_SYMBOL_GPL(lbs_add_mesh);
1355
1356 1354
1357static void lbs_remove_mesh(struct lbs_private *priv) 1355static void lbs_remove_mesh(struct lbs_private *priv)
1358{ 1356{
@@ -1372,7 +1370,6 @@ static void lbs_remove_mesh(struct lbs_private *priv)
1372 free_netdev(mesh_dev); 1370 free_netdev(mesh_dev);
1373 lbs_deb_leave(LBS_DEB_MESH); 1371 lbs_deb_leave(LBS_DEB_MESH);
1374} 1372}
1375EXPORT_SYMBOL_GPL(lbs_remove_mesh);
1376 1373
1377/** 1374/**
1378 * @brief This function finds the CFP in 1375 * @brief This function finds the CFP in
@@ -1458,20 +1455,6 @@ void lbs_interrupt(struct lbs_private *priv)
1458} 1455}
1459EXPORT_SYMBOL_GPL(lbs_interrupt); 1456EXPORT_SYMBOL_GPL(lbs_interrupt);
1460 1457
1461int lbs_reset_device(struct lbs_private *priv)
1462{
1463 int ret;
1464
1465 lbs_deb_enter(LBS_DEB_MAIN);
1466 ret = lbs_prepare_and_send_command(priv, CMD_802_11_RESET,
1467 CMD_ACT_HALT, 0, 0, NULL);
1468 msleep_interruptible(10);
1469
1470 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
1471 return ret;
1472}
1473EXPORT_SYMBOL_GPL(lbs_reset_device);
1474
1475static int __init lbs_init_module(void) 1458static int __init lbs_init_module(void)
1476{ 1459{
1477 lbs_deb_enter(LBS_DEB_MAIN); 1460 lbs_deb_enter(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/p54common.c b/drivers/net/wireless/p54common.c
index 5cda49aff3a8..d191e055a788 100644
--- a/drivers/net/wireless/p54common.c
+++ b/drivers/net/wireless/p54common.c
@@ -166,18 +166,23 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
166 struct p54_common *priv = dev->priv; 166 struct p54_common *priv = dev->priv;
167 struct eeprom_pda_wrap *wrap = NULL; 167 struct eeprom_pda_wrap *wrap = NULL;
168 struct pda_entry *entry; 168 struct pda_entry *entry;
169 int i = 0;
170 unsigned int data_len, entry_len; 169 unsigned int data_len, entry_len;
171 void *tmp; 170 void *tmp;
172 int err; 171 int err;
172 u8 *end = (u8 *)eeprom + len;
173 173
174 wrap = (struct eeprom_pda_wrap *) eeprom; 174 wrap = (struct eeprom_pda_wrap *) eeprom;
175 entry = (void *)wrap->data + wrap->len; 175 entry = (void *)wrap->data + le16_to_cpu(wrap->len);
176 i += 2; 176
177 i += le16_to_cpu(entry->len)*2; 177 /* verify that at least the entry length/code fits */
178 while (i < len) { 178 while ((u8 *)entry <= end - sizeof(*entry)) {
179 entry_len = le16_to_cpu(entry->len); 179 entry_len = le16_to_cpu(entry->len);
180 data_len = ((entry_len - 1) << 1); 180 data_len = ((entry_len - 1) << 1);
181
182 /* abort if entry exceeds whole structure */
183 if ((u8 *)entry + sizeof(*entry) + data_len > end)
184 break;
185
181 switch (le16_to_cpu(entry->code)) { 186 switch (le16_to_cpu(entry->code)) {
182 case PDR_MAC_ADDRESS: 187 case PDR_MAC_ADDRESS:
183 SET_IEEE80211_PERM_ADDR(dev, entry->data); 188 SET_IEEE80211_PERM_ADDR(dev, entry->data);
@@ -249,13 +254,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
249 priv->version = *(u8 *)(entry->data + 1); 254 priv->version = *(u8 *)(entry->data + 1);
250 break; 255 break;
251 case PDR_END: 256 case PDR_END:
252 i = len; 257 /* make it overrun */
258 entry_len = len;
253 break; 259 break;
254 } 260 }
255 261
256 entry = (void *)entry + (entry_len + 1)*2; 262 entry = (void *)entry + (entry_len + 1)*2;
257 i += 2;
258 i += entry_len*2;
259 } 263 }
260 264
261 if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) { 265 if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) {
diff --git a/drivers/net/wireless/p54common.h b/drivers/net/wireless/p54common.h
index a721334e20d9..b67ff34e26fe 100644
--- a/drivers/net/wireless/p54common.h
+++ b/drivers/net/wireless/p54common.h
@@ -53,10 +53,10 @@ struct pda_entry {
53} __attribute__ ((packed)); 53} __attribute__ ((packed));
54 54
55struct eeprom_pda_wrap { 55struct eeprom_pda_wrap {
56 u32 magic; 56 __le32 magic;
57 u16 pad; 57 __le16 pad;
58 u16 len; 58 __le16 len;
59 u32 arm_opcode; 59 __le32 arm_opcode;
60 u8 data[0]; 60 u8 data[0];
61} __attribute__ ((packed)); 61} __attribute__ ((packed));
62 62
diff --git a/drivers/net/wireless/p54usb.c b/drivers/net/wireless/p54usb.c
index 60d286eb0b8b..e7d4aee8799e 100644
--- a/drivers/net/wireless/p54usb.c
+++ b/drivers/net/wireless/p54usb.c
@@ -35,6 +35,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
35 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ 35 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
36 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ 36 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
37 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ 37 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
38 {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
38 {USB_DEVICE(0x0846, 0x4200)}, /* Netgear WG121 */ 39 {USB_DEVICE(0x0846, 0x4200)}, /* Netgear WG121 */
39 {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ 40 {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */
40 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ 41 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
@@ -62,6 +63,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
62 {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */ 63 {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */
63 {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */ 64 {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */
64 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ 65 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
66 {USB_DEVICE(0x13b1, 0x000a)}, /* Linksys WUSB54G ver 2 */
65 {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ 67 {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
66 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 68 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
67 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 69 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 8ce2ddf8024f..10b776c1adc5 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -228,9 +228,9 @@ struct NDIS_WLAN_BSSID_EX {
228 struct NDIS_802_11_SSID Ssid; 228 struct NDIS_802_11_SSID Ssid;
229 __le32 Privacy; 229 __le32 Privacy;
230 __le32 Rssi; 230 __le32 Rssi;
231 enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse; 231 __le32 NetworkTypeInUse;
232 struct NDIS_802_11_CONFIGURATION Configuration; 232 struct NDIS_802_11_CONFIGURATION Configuration;
233 enum NDIS_802_11_NETWORK_INFRASTRUCTURE InfrastructureMode; 233 __le32 InfrastructureMode;
234 u8 SupportedRates[NDIS_802_11_LENGTH_RATES_EX]; 234 u8 SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
235 __le32 IELength; 235 __le32 IELength;
236 u8 IEs[0]; 236 u8 IEs[0];
@@ -260,7 +260,7 @@ struct NDIS_802_11_KEY {
260 __le32 KeyLength; 260 __le32 KeyLength;
261 u8 Bssid[6]; 261 u8 Bssid[6];
262 u8 Padding[6]; 262 u8 Padding[6];
263 __le64 KeyRSC; 263 u8 KeyRSC[8];
264 u8 KeyMaterial[32]; 264 u8 KeyMaterial[32];
265} __attribute__((packed)); 265} __attribute__((packed));
266 266
@@ -279,11 +279,11 @@ struct RNDIS_CONFIG_PARAMETER_INFOBUFFER {
279} __attribute__((packed)); 279} __attribute__((packed));
280 280
281/* these have to match what is in wpa_supplicant */ 281/* these have to match what is in wpa_supplicant */
282enum { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP } wpa_alg; 282enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP };
283enum { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP, CIPHER_WEP104 } 283enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP,
284 wpa_cipher; 284 CIPHER_WEP104 };
285enum { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE, KEY_MGMT_802_1X_NO_WPA, 285enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE,
286 KEY_MGMT_WPA_NONE } wpa_key_mgmt; 286 KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE };
287 287
288/* 288/*
289 * private data 289 * private data
@@ -1508,7 +1508,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1508 struct usbnet *usbdev = dev->priv; 1508 struct usbnet *usbdev = dev->priv;
1509 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1509 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1510 struct NDIS_802_11_KEY ndis_key; 1510 struct NDIS_802_11_KEY ndis_key;
1511 int i, keyidx, ret; 1511 int keyidx, ret;
1512 u8 *addr; 1512 u8 *addr;
1513 1513
1514 keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX; 1514 keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX;
@@ -1543,9 +1543,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1543 ndis_key.KeyIndex = cpu_to_le32(keyidx); 1543 ndis_key.KeyIndex = cpu_to_le32(keyidx);
1544 1544
1545 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { 1545 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
1546 for (i = 0; i < 6; i++) 1546 memcpy(ndis_key.KeyRSC, ext->rx_seq, 6);
1547 ndis_key.KeyRSC |=
1548 cpu_to_le64(ext->rx_seq[i] << (i * 8));
1549 ndis_key.KeyIndex |= cpu_to_le32(1 << 29); 1547 ndis_key.KeyIndex |= cpu_to_le32(1 << 29);
1550 } 1548 }
1551 1549
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index d6cba138c7ab..c69f85ed7669 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -960,8 +960,12 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
960 rt2400pci_disable_radio(rt2x00dev); 960 rt2400pci_disable_radio(rt2x00dev);
961 break; 961 break;
962 case STATE_RADIO_RX_ON: 962 case STATE_RADIO_RX_ON:
963 case STATE_RADIO_RX_ON_LINK:
964 rt2400pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
965 break;
963 case STATE_RADIO_RX_OFF: 966 case STATE_RADIO_RX_OFF:
964 rt2400pci_toggle_rx(rt2x00dev, state); 967 case STATE_RADIO_RX_OFF_LINK:
968 rt2400pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
965 break; 969 break;
966 case STATE_DEEP_SLEEP: 970 case STATE_DEEP_SLEEP:
967 case STATE_SLEEP: 971 case STATE_SLEEP:
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index e874fdcae204..91e87b53374f 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1112,8 +1112,12 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1112 rt2500pci_disable_radio(rt2x00dev); 1112 rt2500pci_disable_radio(rt2x00dev);
1113 break; 1113 break;
1114 case STATE_RADIO_RX_ON: 1114 case STATE_RADIO_RX_ON:
1115 case STATE_RADIO_RX_ON_LINK:
1116 rt2500pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
1117 break;
1115 case STATE_RADIO_RX_OFF: 1118 case STATE_RADIO_RX_OFF:
1116 rt2500pci_toggle_rx(rt2x00dev, state); 1119 case STATE_RADIO_RX_OFF_LINK:
1120 rt2500pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
1117 break; 1121 break;
1118 case STATE_DEEP_SLEEP: 1122 case STATE_DEEP_SLEEP:
1119 case STATE_SLEEP: 1123 case STATE_SLEEP:
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 4ca9730e5e92..638c3d243108 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1001,8 +1001,12 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1001 rt2500usb_disable_radio(rt2x00dev); 1001 rt2500usb_disable_radio(rt2x00dev);
1002 break; 1002 break;
1003 case STATE_RADIO_RX_ON: 1003 case STATE_RADIO_RX_ON:
1004 case STATE_RADIO_RX_ON_LINK:
1005 rt2500usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
1006 break;
1004 case STATE_RADIO_RX_OFF: 1007 case STATE_RADIO_RX_OFF:
1005 rt2500usb_toggle_rx(rt2x00dev, state); 1008 case STATE_RADIO_RX_OFF_LINK:
1009 rt2500usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
1006 break; 1010 break;
1007 case STATE_DEEP_SLEEP: 1011 case STATE_DEEP_SLEEP:
1008 case STATE_SLEEP: 1012 case STATE_SLEEP:
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 72cfe00c1ed7..07adc576db49 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -97,12 +97,16 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
97 libconf.ant.rx = rx; 97 libconf.ant.rx = rx;
98 libconf.ant.tx = tx; 98 libconf.ant.tx = tx;
99 99
100 if (rx == rt2x00dev->link.ant.active.rx &&
101 tx == rt2x00dev->link.ant.active.tx)
102 return;
103
100 /* 104 /*
101 * Antenna setup changes require the RX to be disabled, 105 * Antenna setup changes require the RX to be disabled,
102 * else the changes will be ignored by the device. 106 * else the changes will be ignored by the device.
103 */ 107 */
104 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 108 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
105 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); 109 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
106 110
107 /* 111 /*
108 * Write new antenna setup to device and reset the link tuner. 112 * Write new antenna setup to device and reset the link tuner.
@@ -116,7 +120,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
116 rt2x00dev->link.ant.active.tx = libconf.ant.tx; 120 rt2x00dev->link.ant.active.tx = libconf.ant.tx;
117 121
118 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 122 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
119 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); 123 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
120} 124}
121 125
122void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 126void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index c4be2ac4d7a4..0d51f478bcdf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -61,11 +61,33 @@ EXPORT_SYMBOL_GPL(rt2x00lib_get_ring);
61/* 61/*
62 * Link tuning handlers 62 * Link tuning handlers
63 */ 63 */
64static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev) 64void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
65{ 65{
66 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
67 return;
68
69 /*
70 * Reset link information.
71 * Both the currently active vgc level as well as
72 * the link tuner counter should be reset. Resetting
73 * the counter is important for devices where the
74 * device should only perform link tuning during the
75 * first minute after being enabled.
76 */
66 rt2x00dev->link.count = 0; 77 rt2x00dev->link.count = 0;
67 rt2x00dev->link.vgc_level = 0; 78 rt2x00dev->link.vgc_level = 0;
68 79
80 /*
81 * Reset the link tuner.
82 */
83 rt2x00dev->ops->lib->reset_tuner(rt2x00dev);
84}
85
86static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
87{
88 /*
89 * Clear all (possibly) pre-existing quality statistics.
90 */
69 memset(&rt2x00dev->link.qual, 0, sizeof(rt2x00dev->link.qual)); 91 memset(&rt2x00dev->link.qual, 0, sizeof(rt2x00dev->link.qual));
70 92
71 /* 93 /*
@@ -79,10 +101,7 @@ static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
79 rt2x00dev->link.qual.rx_percentage = 50; 101 rt2x00dev->link.qual.rx_percentage = 50;
80 rt2x00dev->link.qual.tx_percentage = 50; 102 rt2x00dev->link.qual.tx_percentage = 50;
81 103
82 /* 104 rt2x00lib_reset_link_tuner(rt2x00dev);
83 * Reset the link tuner.
84 */
85 rt2x00dev->ops->lib->reset_tuner(rt2x00dev);
86 105
87 queue_delayed_work(rt2x00dev->hw->workqueue, 106 queue_delayed_work(rt2x00dev->hw->workqueue,
88 &rt2x00dev->link.work, LINK_TUNE_INTERVAL); 107 &rt2x00dev->link.work, LINK_TUNE_INTERVAL);
@@ -93,15 +112,6 @@ static void rt2x00lib_stop_link_tuner(struct rt2x00_dev *rt2x00dev)
93 cancel_delayed_work_sync(&rt2x00dev->link.work); 112 cancel_delayed_work_sync(&rt2x00dev->link.work);
94} 113}
95 114
96void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
97{
98 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
99 return;
100
101 rt2x00lib_stop_link_tuner(rt2x00dev);
102 rt2x00lib_start_link_tuner(rt2x00dev);
103}
104
105/* 115/*
106 * Ring initialization 116 * Ring initialization
107 */ 117 */
@@ -260,19 +270,11 @@ static void rt2x00lib_evaluate_antenna_sample(struct rt2x00_dev *rt2x00dev)
260 if (sample_a == sample_b) 270 if (sample_a == sample_b)
261 return; 271 return;
262 272
263 if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) { 273 if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY)
264 if (sample_a > sample_b && rx == ANTENNA_B) 274 rx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
265 rx = ANTENNA_A;
266 else if (rx == ANTENNA_A)
267 rx = ANTENNA_B;
268 }
269 275
270 if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY) { 276 if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)
271 if (sample_a > sample_b && tx == ANTENNA_B) 277 tx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
272 tx = ANTENNA_A;
273 else if (tx == ANTENNA_A)
274 tx = ANTENNA_B;
275 }
276 278
277 rt2x00lib_config_antenna(rt2x00dev, rx, tx); 279 rt2x00lib_config_antenna(rt2x00dev, rx, tx);
278} 280}
@@ -293,7 +295,7 @@ static void rt2x00lib_evaluate_antenna_eval(struct rt2x00_dev *rt2x00dev)
293 * sample the rssi from the other antenna to make a valid 295 * sample the rssi from the other antenna to make a valid
294 * comparison between the 2 antennas. 296 * comparison between the 2 antennas.
295 */ 297 */
296 if ((rssi_curr - rssi_old) > -5 || (rssi_curr - rssi_old) < 5) 298 if (abs(rssi_curr - rssi_old) < 5)
297 return; 299 return;
298 300
299 rt2x00dev->link.ant.flags |= ANTENNA_MODE_SAMPLE; 301 rt2x00dev->link.ant.flags |= ANTENNA_MODE_SAMPLE;
@@ -319,15 +321,15 @@ static void rt2x00lib_evaluate_antenna(struct rt2x00_dev *rt2x00dev)
319 rt2x00dev->link.ant.flags &= ~ANTENNA_TX_DIVERSITY; 321 rt2x00dev->link.ant.flags &= ~ANTENNA_TX_DIVERSITY;
320 322
321 if (rt2x00dev->hw->conf.antenna_sel_rx == 0 && 323 if (rt2x00dev->hw->conf.antenna_sel_rx == 0 &&
322 rt2x00dev->default_ant.rx != ANTENNA_SW_DIVERSITY) 324 rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
323 rt2x00dev->link.ant.flags |= ANTENNA_RX_DIVERSITY; 325 rt2x00dev->link.ant.flags |= ANTENNA_RX_DIVERSITY;
324 if (rt2x00dev->hw->conf.antenna_sel_tx == 0 && 326 if (rt2x00dev->hw->conf.antenna_sel_tx == 0 &&
325 rt2x00dev->default_ant.tx != ANTENNA_SW_DIVERSITY) 327 rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
326 rt2x00dev->link.ant.flags |= ANTENNA_TX_DIVERSITY; 328 rt2x00dev->link.ant.flags |= ANTENNA_TX_DIVERSITY;
327 329
328 if (!(rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) && 330 if (!(rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) &&
329 !(rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)) { 331 !(rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)) {
330 rt2x00dev->link.ant.flags &= ~ANTENNA_MODE_SAMPLE; 332 rt2x00dev->link.ant.flags = 0;
331 return; 333 return;
332 } 334 }
333 335
@@ -441,17 +443,18 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
441 rt2x00dev->ops->lib->link_tuner(rt2x00dev); 443 rt2x00dev->ops->lib->link_tuner(rt2x00dev);
442 444
443 /* 445 /*
444 * Evaluate antenna setup.
445 */
446 rt2x00lib_evaluate_antenna(rt2x00dev);
447
448 /*
449 * Precalculate a portion of the link signal which is 446 * Precalculate a portion of the link signal which is
450 * in based on the tx/rx success/failure counters. 447 * in based on the tx/rx success/failure counters.
451 */ 448 */
452 rt2x00lib_precalculate_link_signal(&rt2x00dev->link.qual); 449 rt2x00lib_precalculate_link_signal(&rt2x00dev->link.qual);
453 450
454 /* 451 /*
452 * Evaluate antenna setup, make this the last step since this could
453 * possibly reset some statistics.
454 */
455 rt2x00lib_evaluate_antenna(rt2x00dev);
456
457 /*
455 * Increase tuner counter, and reschedule the next link tuner run. 458 * Increase tuner counter, and reschedule the next link tuner run.
456 */ 459 */
457 rt2x00dev->link.count++; 460 rt2x00dev->link.count++;
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 838421216da0..b1915dc7dda1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -85,6 +85,8 @@ enum dev_state {
85 STATE_RADIO_OFF, 85 STATE_RADIO_OFF,
86 STATE_RADIO_RX_ON, 86 STATE_RADIO_RX_ON,
87 STATE_RADIO_RX_OFF, 87 STATE_RADIO_RX_OFF,
88 STATE_RADIO_RX_ON_LINK,
89 STATE_RADIO_RX_OFF_LINK,
88 STATE_RADIO_IRQ_ON, 90 STATE_RADIO_IRQ_ON,
89 STATE_RADIO_IRQ_OFF, 91 STATE_RADIO_IRQ_OFF,
90}; 92};
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index b31f0c26c32b..93ea212fedd5 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1482,8 +1482,12 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1482 rt61pci_disable_radio(rt2x00dev); 1482 rt61pci_disable_radio(rt2x00dev);
1483 break; 1483 break;
1484 case STATE_RADIO_RX_ON: 1484 case STATE_RADIO_RX_ON:
1485 case STATE_RADIO_RX_ON_LINK:
1486 rt61pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
1487 break;
1485 case STATE_RADIO_RX_OFF: 1488 case STATE_RADIO_RX_OFF:
1486 rt61pci_toggle_rx(rt2x00dev, state); 1489 case STATE_RADIO_RX_OFF_LINK:
1490 rt61pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
1487 break; 1491 break;
1488 case STATE_DEEP_SLEEP: 1492 case STATE_DEEP_SLEEP:
1489 case STATE_SLEEP: 1493 case STATE_SLEEP:
@@ -2298,9 +2302,9 @@ static void rt61pci_configure_filter(struct ieee80211_hw *hw,
2298 * Apply some rules to the filters: 2302 * Apply some rules to the filters:
2299 * - Some filters imply different filters to be set. 2303 * - Some filters imply different filters to be set.
2300 * - Some things we can't filter out at all. 2304 * - Some things we can't filter out at all.
2305 * - Multicast filter seems to kill broadcast traffic so never use it.
2301 */ 2306 */
2302 if (mc_count) 2307 *total_flags |= FIF_ALLMULTI;
2303 *total_flags |= FIF_ALLMULTI;
2304 if (*total_flags & FIF_OTHER_BSS || 2308 if (*total_flags & FIF_OTHER_BSS ||
2305 *total_flags & FIF_PROMISC_IN_BSS) 2309 *total_flags & FIF_PROMISC_IN_BSS)
2306 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; 2310 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 4d576ab3e7f9..8103d41a1543 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1208,8 +1208,12 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1208 rt73usb_disable_radio(rt2x00dev); 1208 rt73usb_disable_radio(rt2x00dev);
1209 break; 1209 break;
1210 case STATE_RADIO_RX_ON: 1210 case STATE_RADIO_RX_ON:
1211 case STATE_RADIO_RX_ON_LINK:
1212 rt73usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
1213 break;
1211 case STATE_RADIO_RX_OFF: 1214 case STATE_RADIO_RX_OFF:
1212 rt73usb_toggle_rx(rt2x00dev, state); 1215 case STATE_RADIO_RX_OFF_LINK:
1216 rt73usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
1213 break; 1217 break;
1214 case STATE_DEEP_SLEEP: 1218 case STATE_DEEP_SLEEP:
1215 case STATE_SLEEP: 1219 case STATE_SLEEP:
@@ -1865,9 +1869,9 @@ static void rt73usb_configure_filter(struct ieee80211_hw *hw,
1865 * Apply some rules to the filters: 1869 * Apply some rules to the filters:
1866 * - Some filters imply different filters to be set. 1870 * - Some filters imply different filters to be set.
1867 * - Some things we can't filter out at all. 1871 * - Some things we can't filter out at all.
1872 * - Multicast filter seems to kill broadcast traffic so never use it.
1868 */ 1873 */
1869 if (mc_count) 1874 *total_flags |= FIF_ALLMULTI;
1870 *total_flags |= FIF_ALLMULTI;
1871 if (*total_flags & FIF_OTHER_BSS || 1875 if (*total_flags & FIF_OTHER_BSS ||
1872 *total_flags & FIF_PROMISC_IN_BSS) 1876 *total_flags & FIF_PROMISC_IN_BSS)
1873 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; 1877 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
@@ -2094,6 +2098,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2094 /* D-Link */ 2098 /* D-Link */
2095 { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, 2099 { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) },
2096 { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, 2100 { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) },
2101 { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) },
2097 /* Gemtek */ 2102 /* Gemtek */
2098 { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, 2103 { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) },
2099 /* Gigabyte */ 2104 /* Gigabyte */
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl8180_dev.c
index 27ebd689aa21..5e9a8ace0d81 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl8180_dev.c
@@ -135,13 +135,15 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
135 while (skb_queue_len(&ring->queue)) { 135 while (skb_queue_len(&ring->queue)) {
136 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; 136 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
137 struct sk_buff *skb; 137 struct sk_buff *skb;
138 struct ieee80211_tx_status status = { {0} }; 138 struct ieee80211_tx_status status;
139 struct ieee80211_tx_control *control; 139 struct ieee80211_tx_control *control;
140 u32 flags = le32_to_cpu(entry->flags); 140 u32 flags = le32_to_cpu(entry->flags);
141 141
142 if (flags & RTL8180_TX_DESC_FLAG_OWN) 142 if (flags & RTL8180_TX_DESC_FLAG_OWN)
143 return; 143 return;
144 144
145 memset(&status, 0, sizeof(status));
146
145 ring->idx = (ring->idx + 1) % ring->entries; 147 ring->idx = (ring->idx + 1) % ring->entries;
146 skb = __skb_dequeue(&ring->queue); 148 skb = __skb_dequeue(&ring->queue);
147 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), 149 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 0d71716d750d..f44505994a0e 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -113,10 +113,12 @@ void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
113 113
114static void rtl8187_tx_cb(struct urb *urb) 114static void rtl8187_tx_cb(struct urb *urb)
115{ 115{
116 struct ieee80211_tx_status status = { {0} }; 116 struct ieee80211_tx_status status;
117 struct sk_buff *skb = (struct sk_buff *)urb->context; 117 struct sk_buff *skb = (struct sk_buff *)urb->context;
118 struct rtl8187_tx_info *info = (struct rtl8187_tx_info *)skb->cb; 118 struct rtl8187_tx_info *info = (struct rtl8187_tx_info *)skb->cb;
119 119
120 memset(&status, 0, sizeof(status));
121
120 usb_free_urb(info->urb); 122 usb_free_urb(info->urb);
121 if (info->control) 123 if (info->control)
122 memcpy(&status.control, info->control, sizeof(status.control)); 124 memcpy(&status.control, info->control, sizeof(status.control));
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 49127e4b42c2..76ef2d83919d 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -360,11 +360,14 @@ void zd_mac_tx_failed(struct ieee80211_hw *hw)
360{ 360{
361 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue; 361 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue;
362 struct sk_buff *skb; 362 struct sk_buff *skb;
363 struct ieee80211_tx_status status = {{0}}; 363 struct ieee80211_tx_status status;
364 364
365 skb = skb_dequeue(q); 365 skb = skb_dequeue(q);
366 if (skb == NULL) 366 if (skb == NULL)
367 return; 367 return;
368
369 memset(&status, 0, sizeof(status));
370
368 tx_status(hw, skb, &status, 0); 371 tx_status(hw, skb, &status, 0);
369} 372}
370 373
@@ -389,7 +392,8 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
389 if (unlikely(error || 392 if (unlikely(error ||
390 (cb->control->flags & IEEE80211_TXCTL_NO_ACK))) 393 (cb->control->flags & IEEE80211_TXCTL_NO_ACK)))
391 { 394 {
392 struct ieee80211_tx_status status = {{0}}; 395 struct ieee80211_tx_status status;
396 memset(&status, 0, sizeof(status));
393 tx_status(hw, skb, &status, !error); 397 tx_status(hw, skb, &status, !error);
394 } else { 398 } else {
395 struct sk_buff_head *q = 399 struct sk_buff_head *q =
@@ -603,7 +607,9 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
603 tx_hdr = (struct ieee80211_hdr *)skb->data; 607 tx_hdr = (struct ieee80211_hdr *)skb->data;
604 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 608 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1)))
605 { 609 {
606 struct ieee80211_tx_status status = {{0}}; 610 struct ieee80211_tx_status status;
611
612 memset(&status, 0, sizeof(status));
607 status.flags = IEEE80211_TX_STATUS_ACK; 613 status.flags = IEEE80211_TX_STATUS_ACK;
608 status.ack_signal = stats->ssi; 614 status.ack_signal = stats->ssi;
609 __skb_unlink(skb, q); 615 __skb_unlink(skb, q);
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
index 1d3b84b4af3f..553a9905299a 100644
--- a/drivers/parisc/Kconfig
+++ b/drivers/parisc/Kconfig
@@ -103,6 +103,11 @@ config IOMMU_SBA
103 depends on PCI_LBA 103 depends on PCI_LBA
104 default PCI_LBA 104 default PCI_LBA
105 105
106config IOMMU_HELPER
107 bool
108 depends on IOMMU_SBA || IOMMU_CCIO
109 default y
110
106#config PCI_EPIC 111#config PCI_EPIC
107# bool "EPIC/SAGA PCI support" 112# bool "EPIC/SAGA PCI support"
108# depends on PCI 113# depends on PCI
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index d08b284de196..62db3c3fe4dc 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -43,6 +43,7 @@
43#include <linux/proc_fs.h> 43#include <linux/proc_fs.h>
44#include <linux/seq_file.h> 44#include <linux/seq_file.h>
45#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/iommu-helper.h>
46 47
47#include <asm/byteorder.h> 48#include <asm/byteorder.h>
48#include <asm/cache.h> /* for L1_CACHE_BYTES */ 49#include <asm/cache.h> /* for L1_CACHE_BYTES */
@@ -302,13 +303,17 @@ static int ioc_count;
302*/ 303*/
303#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ 304#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
304 for(; res_ptr < res_end; ++res_ptr) { \ 305 for(; res_ptr < res_end; ++res_ptr) { \
305 if(0 == (*res_ptr & mask)) { \ 306 int ret;\
306 *res_ptr |= mask; \ 307 unsigned int idx;\
307 res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ 308 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
308 ioc->res_hint = res_idx + (size >> 3); \ 309 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
309 goto resource_found; \ 310 if ((0 == (*res_ptr & mask)) && !ret) { \
310 } \ 311 *res_ptr |= mask; \
311 } 312 res_idx = idx;\
313 ioc->res_hint = res_idx + (size >> 3); \
314 goto resource_found; \
315 } \
316 }
312 317
313#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ 318#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
314 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ 319 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
@@ -341,10 +346,11 @@ static int ioc_count;
341 * of available pages for the requested size. 346 * of available pages for the requested size.
342 */ 347 */
343static int 348static int
344ccio_alloc_range(struct ioc *ioc, size_t size) 349ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
345{ 350{
346 unsigned int pages_needed = size >> IOVP_SHIFT; 351 unsigned int pages_needed = size >> IOVP_SHIFT;
347 unsigned int res_idx; 352 unsigned int res_idx;
353 unsigned long boundary_size;
348#ifdef CCIO_SEARCH_TIME 354#ifdef CCIO_SEARCH_TIME
349 unsigned long cr_start = mfctl(16); 355 unsigned long cr_start = mfctl(16);
350#endif 356#endif
@@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, size_t size)
360 ** ggg sacrifices another 710 to the computer gods. 366 ** ggg sacrifices another 710 to the computer gods.
361 */ 367 */
362 368
369 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
370 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
371
363 if (pages_needed <= 8) { 372 if (pages_needed <= 8) {
364 /* 373 /*
365 * LAN traffic will not thrash the TLB IFF the same NIC 374 * LAN traffic will not thrash the TLB IFF the same NIC
@@ -760,7 +769,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
760 ioc->msingle_pages += size >> IOVP_SHIFT; 769 ioc->msingle_pages += size >> IOVP_SHIFT;
761#endif 770#endif
762 771
763 idx = ccio_alloc_range(ioc, size); 772 idx = ccio_alloc_range(ioc, dev, size);
764 iovp = (dma_addr_t)MKIOVP(idx); 773 iovp = (dma_addr_t)MKIOVP(idx);
765 774
766 pdir_start = &(ioc->pdir_base[idx]); 775 pdir_start = &(ioc->pdir_base[idx]);
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 97ba8286c596..a9c46cc2db37 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -96,8 +96,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
96 96
97static inline unsigned int 97static inline unsigned int
98iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, 98iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
99 struct scatterlist *startsg, int nents, 99 struct scatterlist *startsg, int nents,
100 int (*iommu_alloc_range)(struct ioc *, size_t)) 100 int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
101{ 101{
102 struct scatterlist *contig_sg; /* contig chunk head */ 102 struct scatterlist *contig_sg; /* contig chunk head */
103 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 103 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
@@ -166,7 +166,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
166 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); 166 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
167 sg_dma_address(contig_sg) = 167 sg_dma_address(contig_sg) =
168 PIDE_FLAG 168 PIDE_FLAG
169 | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT) 169 | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
170 | dma_offset; 170 | dma_offset;
171 n_mappings++; 171 n_mappings++;
172 } 172 }
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index d06627c3f353..bdbe780e21c5 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -29,6 +29,7 @@
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
32#include <linux/iommu-helper.h>
32 33
33#include <asm/byteorder.h> 34#include <asm/byteorder.h>
34#include <asm/io.h> 35#include <asm/io.h>
@@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
313#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) 314#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
314#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) 315#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
315 316
317unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
318 unsigned int bitshiftcnt)
319{
320 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
321 + bitshiftcnt;
322}
316 323
317/** 324/**
318 * sba_search_bitmap - find free space in IO PDIR resource bitmap 325 * sba_search_bitmap - find free space in IO PDIR resource bitmap
@@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
324 * Cool perf optimization: search for log2(size) bits at a time. 331 * Cool perf optimization: search for log2(size) bits at a time.
325 */ 332 */
326static SBA_INLINE unsigned long 333static SBA_INLINE unsigned long
327sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) 334sba_search_bitmap(struct ioc *ioc, struct device *dev,
335 unsigned long bits_wanted)
328{ 336{
329 unsigned long *res_ptr = ioc->res_hint; 337 unsigned long *res_ptr = ioc->res_hint;
330 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); 338 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
331 unsigned long pide = ~0UL; 339 unsigned long pide = ~0UL, tpide;
340 unsigned long boundary_size;
341 unsigned long shift;
342 int ret;
343
344 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
345 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
346
347#if defined(ZX1_SUPPORT)
348 BUG_ON(ioc->ibase & ~IOVP_MASK);
349 shift = ioc->ibase >> IOVP_SHIFT;
350#else
351 shift = 0;
352#endif
332 353
333 if (bits_wanted > (BITS_PER_LONG/2)) { 354 if (bits_wanted > (BITS_PER_LONG/2)) {
334 /* Search word at a time - no mask needed */ 355 /* Search word at a time - no mask needed */
335 for(; res_ptr < res_end; ++res_ptr) { 356 for(; res_ptr < res_end; ++res_ptr) {
336 if (*res_ptr == 0) { 357 tpide = ptr_to_pide(ioc, res_ptr, 0);
358 ret = iommu_is_span_boundary(tpide, bits_wanted,
359 shift,
360 boundary_size);
361 if ((*res_ptr == 0) && !ret) {
337 *res_ptr = RESMAP_MASK(bits_wanted); 362 *res_ptr = RESMAP_MASK(bits_wanted);
338 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 363 pide = tpide;
339 pide <<= 3; /* convert to bit address */
340 break; 364 break;
341 } 365 }
342 } 366 }
@@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
365 { 389 {
366 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); 390 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
367 WARN_ON(mask == 0); 391 WARN_ON(mask == 0);
368 if(((*res_ptr) & mask) == 0) { 392 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
393 ret = iommu_is_span_boundary(tpide, bits_wanted,
394 shift,
395 boundary_size);
396 if ((((*res_ptr) & mask) == 0) && !ret) {
369 *res_ptr |= mask; /* mark resources busy! */ 397 *res_ptr |= mask; /* mark resources busy! */
370 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); 398 pide = tpide;
371 pide <<= 3; /* convert to bit address */
372 pide += bitshiftcnt;
373 break; 399 break;
374 } 400 }
375 mask >>= o; 401 mask >>= o;
@@ -404,7 +430,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
404 * resource bit map. 430 * resource bit map.
405 */ 431 */
406static int 432static int
407sba_alloc_range(struct ioc *ioc, size_t size) 433sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
408{ 434{
409 unsigned int pages_needed = size >> IOVP_SHIFT; 435 unsigned int pages_needed = size >> IOVP_SHIFT;
410#ifdef SBA_COLLECT_STATS 436#ifdef SBA_COLLECT_STATS
@@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, size_t size)
412#endif 438#endif
413 unsigned long pide; 439 unsigned long pide;
414 440
415 pide = sba_search_bitmap(ioc, pages_needed); 441 pide = sba_search_bitmap(ioc, dev, pages_needed);
416 if (pide >= (ioc->res_size << 3)) { 442 if (pide >= (ioc->res_size << 3)) {
417 pide = sba_search_bitmap(ioc, pages_needed); 443 pide = sba_search_bitmap(ioc, dev, pages_needed);
418 if (pide >= (ioc->res_size << 3)) 444 if (pide >= (ioc->res_size << 3))
419 panic("%s: I/O MMU @ %p is out of mapping resources\n", 445 panic("%s: I/O MMU @ %p is out of mapping resources\n",
420 __FILE__, ioc->ioc_hpa); 446 __FILE__, ioc->ioc_hpa);
@@ -710,7 +736,7 @@ sba_map_single(struct device *dev, void *addr, size_t size,
710 ioc->msingle_calls++; 736 ioc->msingle_calls++;
711 ioc->msingle_pages += size >> IOVP_SHIFT; 737 ioc->msingle_pages += size >> IOVP_SHIFT;
712#endif 738#endif
713 pide = sba_alloc_range(ioc, size); 739 pide = sba_alloc_range(ioc, dev, size);
714 iovp = (dma_addr_t) pide << IOVP_SHIFT; 740 iovp = (dma_addr_t) pide << IOVP_SHIFT;
715 741
716 DBG_RUN("%s() 0x%p -> 0x%lx\n", 742 DBG_RUN("%s() 0x%p -> 0x%lx\n",
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index ef5a6a245f5f..6a9403d79e0c 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -145,13 +145,15 @@ void pci_bus_add_devices(struct pci_bus *bus)
145 child_bus = dev->subordinate; 145 child_bus = dev->subordinate;
146 child_bus->dev.parent = child_bus->bridge; 146 child_bus->dev.parent = child_bus->bridge;
147 retval = device_register(&child_bus->dev); 147 retval = device_register(&child_bus->dev);
148 if (!retval) 148 if (retval)
149 dev_err(&dev->dev, "Error registering pci_bus,"
150 " continuing...\n");
151 else
149 retval = device_create_file(&child_bus->dev, 152 retval = device_create_file(&child_bus->dev,
150 &dev_attr_cpuaffinity); 153 &dev_attr_cpuaffinity);
151 if (retval) 154 if (retval)
152 dev_err(&dev->dev, "Error registering pci_bus" 155 dev_err(&dev->dev, "Error creating cpuaffinity"
153 " device bridge symlink," 156 " file, continuing...\n");
154 " continuing...\n");
155 } 157 }
156 } 158 }
157} 159}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 8ed26480371f..f941f609dbf3 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -14,11 +14,12 @@
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com> 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 * 21 *
21 * This file implements early detection/parsing of DMA Remapping Devices 22 * This file implements early detection/parsing of DMA Remapping Devices
22 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI 23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
23 * tables. 24 * tables.
24 */ 25 */
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
index a590ef682153..4d4a64478404 100644
--- a/drivers/pci/hotplug-pci.c
+++ b/drivers/pci/hotplug-pci.c
@@ -4,7 +4,7 @@
4#include "pci.h" 4#include "pci.h"
5 5
6 6
7unsigned int pci_do_scan_bus(struct pci_bus *bus) 7unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
8{ 8{
9 unsigned int max; 9 unsigned int max;
10 10
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index cf22f9e01e00..5e50008d1181 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -1085,7 +1085,7 @@ static int acpiphp_bus_trim(acpi_handle handle)
1085 * This function should be called per *physical slot*, 1085 * This function should be called per *physical slot*,
1086 * not per each slot object in ACPI namespace. 1086 * not per each slot object in ACPI namespace.
1087 */ 1087 */
1088static int enable_device(struct acpiphp_slot *slot) 1088static int __ref enable_device(struct acpiphp_slot *slot)
1089{ 1089{
1090 struct pci_dev *dev; 1090 struct pci_dev *dev;
1091 struct pci_bus *bus = slot->bridge->pci_bus; 1091 struct pci_bus *bus = slot->bridge->pci_bus;
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 5e9be44817cb..b3515fc4cd38 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot)
250 * Device configuration functions 250 * Device configuration functions
251 */ 251 */
252 252
253int cpci_configure_slot(struct slot* slot) 253int __ref cpci_configure_slot(struct slot *slot)
254{ 254{
255 struct pci_bus *parent; 255 struct pci_bus *parent;
256 int fn; 256 int fn;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 600ed7b67ae7..bbccde9f228f 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -963,6 +963,7 @@ static int __init ebda_rsrc_controller (void)
963 963
964 bus_info_ptr1 = ibmphp_find_same_bus_num (hpc_ptr->slots[index].slot_bus_num); 964 bus_info_ptr1 = ibmphp_find_same_bus_num (hpc_ptr->slots[index].slot_bus_num);
965 if (!bus_info_ptr1) { 965 if (!bus_info_ptr1) {
966 kfree(tmp_slot);
966 rc = -ENODEV; 967 rc = -ENODEV;
967 goto error; 968 goto error;
968 } 969 }
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 6eba9b2cfb90..698975a6a21c 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -711,7 +711,8 @@ static int hpc_power_off_slot(struct slot * slot)
711 retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); 711 retval = pcie_write_cmd(slot, slot_cmd, cmd_mask);
712 if (retval) { 712 if (retval) {
713 err("%s: Write command failed!\n", __FUNCTION__); 713 err("%s: Write command failed!\n", __FUNCTION__);
714 return -1; 714 retval = -1;
715 goto out;
715 } 716 }
716 dbg("%s: SLOTCTRL %x write cmd %x\n", 717 dbg("%s: SLOTCTRL %x write cmd %x\n",
717 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); 718 __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd);
@@ -722,7 +723,7 @@ static int hpc_power_off_slot(struct slot * slot)
722 * removed from the slot/adapter. 723 * removed from the slot/adapter.
723 */ 724 */
724 msleep(1000); 725 msleep(1000);
725 726 out:
726 if (changed) 727 if (changed)
727 pcie_unmask_bad_dllp(ctrl); 728 pcie_unmask_bad_dllp(ctrl);
728 729
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index dd50713966d1..9372a840b63d 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -167,7 +167,7 @@ static void program_fw_provided_values(struct pci_dev *dev)
167 } 167 }
168} 168}
169 169
170static int pciehp_add_bridge(struct pci_dev *dev) 170static int __ref pciehp_add_bridge(struct pci_dev *dev)
171{ 171{
172 struct pci_bus *parent = dev->bus; 172 struct pci_bus *parent = dev->bus;
173 int pass, busnr, start = parent->secondary; 173 int pass, busnr, start = parent->secondary;
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 0a6b25ef194c..a69a21520895 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -96,7 +96,7 @@ static void program_fw_provided_values(struct pci_dev *dev)
96 } 96 }
97} 97}
98 98
99int shpchp_configure_device(struct slot *p_slot) 99int __ref shpchp_configure_device(struct slot *p_slot)
100{ 100{
101 struct pci_dev *dev; 101 struct pci_dev *dev;
102 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; 102 struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index a4c3089f892a..977d29b32295 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -14,9 +14,10 @@
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com> 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 */ 21 */
21 22
22#include <linux/init.h> 23#include <linux/init.h>
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
index 07f5f6353bda..afc0ad96122e 100644
--- a/drivers/pci/intel-iommu.h
+++ b/drivers/pci/intel-iommu.h
@@ -14,8 +14,9 @@
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com> 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
19 */ 20 */
20 21
21#ifndef _INTEL_IOMMU_H_ 22#ifndef _INTEL_IOMMU_H_
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 8de7ab6c6d0c..dbcdd6bfa63a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * This file is released under the GPLv2. 4 * This file is released under the GPLv2.
5 * 5 *
6 * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 */ 8 */
8 9
9#include "iova.h" 10#include "iova.h"
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
index d521b5b7319c..228f6c94b69c 100644
--- a/drivers/pci/iova.h
+++ b/drivers/pci/iova.h
@@ -3,7 +3,8 @@
3 * 3 *
4 * This file is released under the GPLv2. 4 * This file is released under the GPLv2.
5 * 5 *
6 * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 * 8 *
8 */ 9 */
9 10
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ae3df46eaabf..183fddaa38b7 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -554,6 +554,7 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
554 case PM_EVENT_PRETHAW: 554 case PM_EVENT_PRETHAW:
555 /* REVISIT both freeze and pre-thaw "should" use D0 */ 555 /* REVISIT both freeze and pre-thaw "should" use D0 */
556 case PM_EVENT_SUSPEND: 556 case PM_EVENT_SUSPEND:
557 case PM_EVENT_HIBERNATE:
557 return PCI_D3hot; 558 return PCI_D3hot;
558 default: 559 default:
559 printk("Unrecognized suspend event %d\n", state.event); 560 printk("Unrecognized suspend event %d\n", state.event);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4d23b9fb551b..2db2e4bb0d1e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -286,7 +286,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
286 } 286 }
287} 287}
288 288
289void pci_read_bridge_bases(struct pci_bus *child) 289void __devinit pci_read_bridge_bases(struct pci_bus *child)
290{ 290{
291 struct pci_dev *dev = child->self; 291 struct pci_dev *dev = child->self;
292 u8 io_base_lo, io_limit_lo; 292 u8 io_base_lo, io_limit_lo;
@@ -472,7 +472,7 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
472 * them, we proceed to assigning numbers to the remaining buses in 472 * them, we proceed to assigning numbers to the remaining buses in
473 * order to avoid overlaps between old and new bus numbers. 473 * order to avoid overlaps between old and new bus numbers.
474 */ 474 */
475int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass) 475int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
476{ 476{
477 struct pci_bus *child; 477 struct pci_bus *child;
478 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 478 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
@@ -1008,7 +1008,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
1008 return nr; 1008 return nr;
1009} 1009}
1010 1010
1011unsigned int pci_scan_child_bus(struct pci_bus *bus) 1011unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1012{ 1012{
1013 unsigned int devfn, pass, max = bus->secondary; 1013 unsigned int devfn, pass, max = bus->secondary;
1014 struct pci_dev *dev; 1014 struct pci_dev *dev;
@@ -1116,7 +1116,7 @@ err_out:
1116 return NULL; 1116 return NULL;
1117} 1117}
1118 1118
1119struct pci_bus *pci_scan_bus_parented(struct device *parent, 1119struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1120 int bus, struct pci_ops *ops, void *sysdata) 1120 int bus, struct pci_ops *ops, void *sysdata)
1121{ 1121{
1122 struct pci_bus *b; 1122 struct pci_bus *b;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bbad4a9f264f..e9a333d98552 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1652,9 +1652,8 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
1652 pci_write_config_byte(dev, 0x75, 0x1); 1652 pci_write_config_byte(dev, 0x75, 0x1);
1653 pci_write_config_byte(dev, 0x77, 0x0); 1653 pci_write_config_byte(dev, 0x77, 0x0);
1654 1654
1655 printk(KERN_INFO 1655 dev_info(&dev->dev,
1656 "PCI: VIA CX700 PCI parking/caching fixup on %s\n", 1656 "Disabling VIA CX700 PCI parking/caching\n");
1657 pci_name(dev));
1658 } 1657 }
1659 } 1658 }
1660} 1659}
@@ -1726,32 +1725,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2
1726 quirk_msi_ht_cap); 1725 quirk_msi_ht_cap);
1727 1726
1728 1727
1729/*
1730 * Force enable MSI mapping capability on HT bridges
1731 */
1732static void __devinit quirk_msi_ht_cap_enable(struct pci_dev *dev)
1733{
1734 int pos, ttl = 48;
1735
1736 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
1737 while (pos && ttl--) {
1738 u8 flags;
1739
1740 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) {
1741 printk(KERN_INFO "PCI: Enabling HT MSI Mapping on %s\n",
1742 pci_name(dev));
1743
1744 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
1745 flags | HT_MSI_FLAGS_ENABLE);
1746 }
1747 pos = pci_find_next_ht_capability(dev, pos,
1748 HT_CAPTYPE_MSI_MAPPING);
1749 }
1750}
1751DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
1752 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
1753 quirk_msi_ht_cap_enable);
1754
1755/* The nVidia CK804 chipset may have 2 HT MSI mappings. 1728/* The nVidia CK804 chipset may have 2 HT MSI mappings.
1756 * MSI are supported if the MSI capability set in any of these mappings. 1729 * MSI are supported if the MSI capability set in any of these mappings.
1757 */ 1730 */
@@ -1778,9 +1751,8 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
1778DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, 1751DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1779 quirk_nvidia_ck804_msi_ht_cap); 1752 quirk_nvidia_ck804_msi_ht_cap);
1780 1753
1781/* 1754/* Force enable MSI mapping capability on HT bridges */
1782 * Force enable MSI mapping capability on HT bridges */ 1755static void __devinit ht_enable_msi_mapping(struct pci_dev *dev)
1783static inline void ht_enable_msi_mapping(struct pci_dev *dev)
1784{ 1756{
1785 int pos, ttl = 48; 1757 int pos, ttl = 48;
1786 1758
@@ -1799,6 +1771,9 @@ static inline void ht_enable_msi_mapping(struct pci_dev *dev)
1799 HT_CAPTYPE_MSI_MAPPING); 1771 HT_CAPTYPE_MSI_MAPPING);
1800 } 1772 }
1801} 1773}
1774DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
1775 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
1776 ht_enable_msi_mapping);
1802 1777
1803static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) 1778static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
1804{ 1779{
@@ -1830,7 +1805,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
1830 1805
1831 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, 1806 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
1832 &flags) == 0) { 1807 &flags) == 0) {
1833 dev_info(&dev->dev, "Quirk disabling HT MSI mapping"); 1808 dev_info(&dev->dev, "Disabling HT MSI mapping");
1834 pci_write_config_byte(dev, pos + HT_MSI_FLAGS, 1809 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
1835 flags & ~HT_MSI_FLAGS_ENABLE); 1810 flags & ~HT_MSI_FLAGS_ENABLE);
1836 } 1811 }
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index a98b2470b9ea..bd5c0e031398 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -242,8 +242,7 @@ void pci_remove_rom(struct pci_dev *pdev)
242#endif /* 0 */ 242#endif /* 0 */
243 243
244/** 244/**
245 * pci_cleanup_rom - internal routine for freeing the ROM copy created 245 * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy
246 * by pci_map_rom_copy called from remove.c
247 * @pdev: pointer to pci device struct 246 * @pdev: pointer to pci device struct
248 * 247 *
249 * Free the copied ROM if we allocated one. 248 * Free the copied ROM if we allocated one.
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 4065139753b6..37993206ae5d 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -17,7 +17,6 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/pnp.h> 18#include <linux/pnp.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/dmi.h>
21#include <linux/kallsyms.h> 20#include <linux/kallsyms.h>
22#include "base.h" 21#include "base.h"
23 22
@@ -109,42 +108,73 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
109 "pnp: SB audio device quirk - increasing port range\n"); 108 "pnp: SB audio device quirk - increasing port range\n");
110} 109}
111 110
112static void quirk_supermicro_h8dce_system(struct pnp_dev *dev) 111
112#include <linux/pci.h>
113
114static void quirk_system_pci_resources(struct pnp_dev *dev)
113{ 115{
114 int i; 116 struct pci_dev *pdev = NULL;
115 static struct dmi_system_id supermicro_h8dce[] = { 117 resource_size_t pnp_start, pnp_end, pci_start, pci_end;
116 { 118 int i, j;
117 .ident = "Supermicro H8DCE",
118 .matches = {
119 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
120 DMI_MATCH(DMI_PRODUCT_NAME, "H8DCE"),
121 },
122 },
123 { }
124 };
125
126 if (!dmi_check_system(supermicro_h8dce))
127 return;
128 119
129 /* 120 /*
130 * On the Supermicro H8DCE, there's a system device with resources 121 * Some BIOSes have PNP motherboard devices with resources that
131 * that overlap BAR 6 of the built-in SATA PCI adapter. If the PNP 122 * partially overlap PCI BARs. The PNP system driver claims these
132 * system device claims them, the sata_nv driver won't be able to. 123 * motherboard resources, which prevents the normal PCI driver from
133 * More details at: 124 * requesting them later.
134 * https://bugzilla.redhat.com/show_bug.cgi?id=280641 125 *
135 * https://bugzilla.redhat.com/show_bug.cgi?id=313491 126 * This patch disables the PNP resources that conflict with PCI BARs
136 * http://lkml.org/lkml/2008/1/9/449 127 * so they won't be claimed by the PNP system driver.
137 * http://thread.gmane.org/gmane.linux.acpi.devel/27312
138 */ 128 */
139 for (i = 0; i < PNP_MAX_MEM; i++) { 129 for_each_pci_dev(pdev) {
140 if (pnp_mem_valid(dev, i) && pnp_mem_len(dev, i) && 130 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
141 (pnp_mem_start(dev, i) & 0xdfef0000) == 0xdfef0000) { 131 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM) ||
142 dev_warn(&dev->dev, "disabling 0x%llx-0x%llx to prevent" 132 pci_resource_len(pdev, i) == 0)
143 " conflict with sata_nv PCI device\n", 133 continue;
144 (unsigned long long) pnp_mem_start(dev, i), 134
145 (unsigned long long) (pnp_mem_start(dev, i) + 135 pci_start = pci_resource_start(pdev, i);
146 pnp_mem_len(dev, i) - 1)); 136 pci_end = pci_resource_end(pdev, i);
147 pnp_mem_flags(dev, i) = 0; 137 for (j = 0; j < PNP_MAX_MEM; j++) {
138 if (!pnp_mem_valid(dev, j) ||
139 pnp_mem_len(dev, j) == 0)
140 continue;
141
142 pnp_start = pnp_mem_start(dev, j);
143 pnp_end = pnp_mem_end(dev, j);
144
145 /*
146 * If the PNP region doesn't overlap the PCI
147 * region at all, there's no problem.
148 */
149 if (pnp_end < pci_start || pnp_start > pci_end)
150 continue;
151
152 /*
153 * If the PNP region completely encloses (or is
154 * at least as large as) the PCI region, that's
155 * also OK. For example, this happens when the
156 * PNP device describes a bridge with PCI
157 * behind it.
158 */
159 if (pnp_start <= pci_start &&
160 pnp_end >= pci_end)
161 continue;
162
163 /*
164 * Otherwise, the PNP region overlaps *part* of
165 * the PCI region, and that might prevent a PCI
166 * driver from requesting its resources.
167 */
168 dev_warn(&dev->dev, "mem resource "
169 "(0x%llx-0x%llx) overlaps %s BAR %d "
170 "(0x%llx-0x%llx), disabling\n",
171 (unsigned long long) pnp_start,
172 (unsigned long long) pnp_end,
173 pci_name(pdev), i,
174 (unsigned long long) pci_start,
175 (unsigned long long) pci_end);
176 pnp_mem_flags(dev, j) = 0;
177 }
148 } 178 }
149 } 179 }
150} 180}
@@ -169,8 +199,8 @@ static struct pnp_fixup pnp_fixups[] = {
169 {"CTL0043", quirk_sb16audio_resources}, 199 {"CTL0043", quirk_sb16audio_resources},
170 {"CTL0044", quirk_sb16audio_resources}, 200 {"CTL0044", quirk_sb16audio_resources},
171 {"CTL0045", quirk_sb16audio_resources}, 201 {"CTL0045", quirk_sb16audio_resources},
172 {"PNP0c01", quirk_supermicro_h8dce_system}, 202 {"PNP0c01", quirk_system_pci_resources},
173 {"PNP0c02", quirk_supermicro_h8dce_system}, 203 {"PNP0c02", quirk_system_pci_resources},
174 {""} 204 {""}
175}; 205};
176 206
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index 5480119ff9d3..3ce9f3defc12 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -78,8 +78,7 @@ void rio_dev_put(struct rio_dev *rdev)
78} 78}
79 79
80/** 80/**
81 * rio_device_probe - Tell if a RIO device structure has a matching RIO 81 * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure
82 * device id structure
83 * @id: the RIO device id structure to match against 82 * @id: the RIO device id structure to match against
84 * @dev: the RIO device structure to match against 83 * @dev: the RIO device structure to match against
85 * 84 *
@@ -137,7 +136,7 @@ static int rio_device_remove(struct device *dev)
137 * rio_register_driver - register a new RIO driver 136 * rio_register_driver - register a new RIO driver
138 * @rdrv: the RIO driver structure to register 137 * @rdrv: the RIO driver structure to register
139 * 138 *
140 * Adds a &struct rio_driver to the list of registered drivers 139 * Adds a &struct rio_driver to the list of registered drivers.
141 * Returns a negative value on error, otherwise 0. If no error 140 * Returns a negative value on error, otherwise 0. If no error
142 * occurred, the driver remains registered even if no device 141 * occurred, the driver remains registered even if no device
143 * was claimed during registration. 142 * was claimed during registration.
@@ -167,8 +166,7 @@ void rio_unregister_driver(struct rio_driver *rdrv)
167} 166}
168 167
169/** 168/**
170 * rio_match_bus - Tell if a RIO device structure has a matching RIO 169 * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
171 * driver device id structure
172 * @dev: the standard device structure to match against 170 * @dev: the standard device structure to match against
173 * @drv: the standard driver structure containing the ids to match against 171 * @drv: the standard driver structure containing the ids to match against
174 * 172 *
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6402d699072b..82f5ad9c3af4 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -250,6 +250,15 @@ config RTC_DRV_TWL92330
250 platforms. The support is integrated with the rest of 250 platforms. The support is integrated with the rest of
251 the Menelaus driver; it's not separate module. 251 the Menelaus driver; it's not separate module.
252 252
253config RTC_DRV_S35390A
254 tristate "Seiko Instruments S-35390A"
255 help
256 If you say yes here you will get support for the Seiko
257 Instruments S-35390A.
258
259 This driver can also be built as a module. If so the module
260 will be called rtc-s35390a.
261
253endif # I2C 262endif # I2C
254 263
255comment "SPI RTC drivers" 264comment "SPI RTC drivers"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index ec703f34ab86..872f1218ff9f 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
45obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o 45obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
46obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o 46obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
47obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o 47obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
48obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o
48obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o 49obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
49obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o 50obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o
50obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o 51obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e059f94c79eb..f3ee2ad566b4 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -388,6 +388,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
388 return seq_printf(seq, 388 return seq_printf(seq,
389 "periodic_IRQ\t: %s\n" 389 "periodic_IRQ\t: %s\n"
390 "update_IRQ\t: %s\n" 390 "update_IRQ\t: %s\n"
391 "HPET_emulated\t: %s\n"
391 // "square_wave\t: %s\n" 392 // "square_wave\t: %s\n"
392 // "BCD\t\t: %s\n" 393 // "BCD\t\t: %s\n"
393 "DST_enable\t: %s\n" 394 "DST_enable\t: %s\n"
@@ -395,6 +396,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
395 "batt_status\t: %s\n", 396 "batt_status\t: %s\n",
396 (rtc_control & RTC_PIE) ? "yes" : "no", 397 (rtc_control & RTC_PIE) ? "yes" : "no",
397 (rtc_control & RTC_UIE) ? "yes" : "no", 398 (rtc_control & RTC_UIE) ? "yes" : "no",
399 is_hpet_enabled() ? "yes" : "no",
398 // (rtc_control & RTC_SQWE) ? "yes" : "no", 400 // (rtc_control & RTC_SQWE) ? "yes" : "no",
399 // (rtc_control & RTC_DM_BINARY) ? "no" : "yes", 401 // (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
400 (rtc_control & RTC_DST_EN) ? "yes" : "no", 402 (rtc_control & RTC_DST_EN) ? "yes" : "no",
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
new file mode 100644
index 000000000000..e8abc90c32c5
--- /dev/null
+++ b/drivers/rtc/rtc-s35390a.c
@@ -0,0 +1,316 @@
1/*
2 * Seiko Instruments S-35390A RTC Driver
3 *
4 * Copyright (c) 2007 Byron Bradley
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/rtc.h>
14#include <linux/i2c.h>
15#include <linux/bitrev.h>
16#include <linux/bcd.h>
17#include <linux/slab.h>
18
19#define S35390A_CMD_STATUS1 0
20#define S35390A_CMD_STATUS2 1
21#define S35390A_CMD_TIME1 2
22
23#define S35390A_BYTE_YEAR 0
24#define S35390A_BYTE_MONTH 1
25#define S35390A_BYTE_DAY 2
26#define S35390A_BYTE_WDAY 3
27#define S35390A_BYTE_HOURS 4
28#define S35390A_BYTE_MINS 5
29#define S35390A_BYTE_SECS 6
30
31#define S35390A_FLAG_POC 0x01
32#define S35390A_FLAG_BLD 0x02
33#define S35390A_FLAG_24H 0x40
34#define S35390A_FLAG_RESET 0x80
35#define S35390A_FLAG_TEST 0x01
36
37struct s35390a {
38 struct i2c_client *client[8];
39 struct rtc_device *rtc;
40 int twentyfourhour;
41};
42
43static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len)
44{
45 struct i2c_client *client = s35390a->client[reg];
46 struct i2c_msg msg[] = {
47 { client->addr, 0, len, buf },
48 };
49
50 if ((i2c_transfer(client->adapter, msg, 1)) != 1)
51 return -EIO;
52
53 return 0;
54}
55
56static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
57{
58 struct i2c_client *client = s35390a->client[reg];
59 struct i2c_msg msg[] = {
60 { client->addr, I2C_M_RD, len, buf },
61 };
62
63 if ((i2c_transfer(client->adapter, msg, 1)) != 1)
64 return -EIO;
65
66 return 0;
67}
68
69static int s35390a_reset(struct s35390a *s35390a)
70{
71 char buf[1];
72
73 if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
74 return -EIO;
75
76 if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
77 return 0;
78
79 buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
80 buf[0] &= 0xf0;
81 return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
82}
83
84static int s35390a_disable_test_mode(struct s35390a *s35390a)
85{
86 char buf[1];
87
88 if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0)
89 return -EIO;
90
91 if (!(buf[0] & S35390A_FLAG_TEST))
92 return 0;
93
94 buf[0] &= ~S35390A_FLAG_TEST;
95 return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf));
96}
97
98static char s35390a_hr2reg(struct s35390a *s35390a, int hour)
99{
100 if (s35390a->twentyfourhour)
101 return BIN2BCD(hour);
102
103 if (hour < 12)
104 return BIN2BCD(hour);
105
106 return 0x40 | BIN2BCD(hour - 12);
107}
108
109static int s35390a_reg2hr(struct s35390a *s35390a, char reg)
110{
111 unsigned hour;
112
113 if (s35390a->twentyfourhour)
114 return BCD2BIN(reg & 0x3f);
115
116 hour = BCD2BIN(reg & 0x3f);
117 if (reg & 0x40)
118 hour += 12;
119
120 return hour;
121}
122
123static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm)
124{
125 struct s35390a *s35390a = i2c_get_clientdata(client);
126 int i, err;
127 char buf[7];
128
129 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, "
130 "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec,
131 tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year,
132 tm->tm_wday);
133
134 buf[S35390A_BYTE_YEAR] = BIN2BCD(tm->tm_year - 100);
135 buf[S35390A_BYTE_MONTH] = BIN2BCD(tm->tm_mon + 1);
136 buf[S35390A_BYTE_DAY] = BIN2BCD(tm->tm_mday);
137 buf[S35390A_BYTE_WDAY] = BIN2BCD(tm->tm_wday);
138 buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour);
139 buf[S35390A_BYTE_MINS] = BIN2BCD(tm->tm_min);
140 buf[S35390A_BYTE_SECS] = BIN2BCD(tm->tm_sec);
141
142 /* This chip expects the bits of each byte to be in reverse order */
143 for (i = 0; i < 7; ++i)
144 buf[i] = bitrev8(buf[i]);
145
146 err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf));
147
148 return err;
149}
150
151static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm)
152{
153 struct s35390a *s35390a = i2c_get_clientdata(client);
154 char buf[7];
155 int i, err;
156
157 err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf));
158 if (err < 0)
159 return err;
160
161 /* This chip returns the bits of each byte in reverse order */
162 for (i = 0; i < 7; ++i)
163 buf[i] = bitrev8(buf[i]);
164
165 tm->tm_sec = BCD2BIN(buf[S35390A_BYTE_SECS]);
166 tm->tm_min = BCD2BIN(buf[S35390A_BYTE_MINS]);
167 tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]);
168 tm->tm_wday = BCD2BIN(buf[S35390A_BYTE_WDAY]);
169 tm->tm_mday = BCD2BIN(buf[S35390A_BYTE_DAY]);
170 tm->tm_mon = BCD2BIN(buf[S35390A_BYTE_MONTH]) - 1;
171 tm->tm_year = BCD2BIN(buf[S35390A_BYTE_YEAR]) + 100;
172
173 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, "
174 "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec,
175 tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year,
176 tm->tm_wday);
177
178 return rtc_valid_tm(tm);
179}
180
181static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm)
182{
183 return s35390a_get_datetime(to_i2c_client(dev), tm);
184}
185
186static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm)
187{
188 return s35390a_set_datetime(to_i2c_client(dev), tm);
189}
190
191static const struct rtc_class_ops s35390a_rtc_ops = {
192 .read_time = s35390a_rtc_read_time,
193 .set_time = s35390a_rtc_set_time,
194};
195
196static struct i2c_driver s35390a_driver;
197
198static int s35390a_probe(struct i2c_client *client)
199{
200 int err;
201 unsigned int i;
202 struct s35390a *s35390a;
203 struct rtc_time tm;
204 char buf[1];
205
206 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
207 err = -ENODEV;
208 goto exit;
209 }
210
211 s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL);
212 if (!s35390a) {
213 err = -ENOMEM;
214 goto exit;
215 }
216
217 s35390a->client[0] = client;
218 i2c_set_clientdata(client, s35390a);
219
220 /* This chip uses multiple addresses, use dummy devices for them */
221 for (i = 1; i < 8; ++i) {
222 s35390a->client[i] = i2c_new_dummy(client->adapter,
223 client->addr + i, "rtc-s35390a");
224 if (!s35390a->client[i]) {
225 dev_err(&client->dev, "Address %02x unavailable\n",
226 client->addr + i);
227 err = -EBUSY;
228 goto exit_dummy;
229 }
230 }
231
232 err = s35390a_reset(s35390a);
233 if (err < 0) {
234 dev_err(&client->dev, "error resetting chip\n");
235 goto exit_dummy;
236 }
237
238 err = s35390a_disable_test_mode(s35390a);
239 if (err < 0) {
240 dev_err(&client->dev, "error disabling test mode\n");
241 goto exit_dummy;
242 }
243
244 err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
245 if (err < 0) {
246 dev_err(&client->dev, "error checking 12/24 hour mode\n");
247 goto exit_dummy;
248 }
249 if (buf[0] & S35390A_FLAG_24H)
250 s35390a->twentyfourhour = 1;
251 else
252 s35390a->twentyfourhour = 0;
253
254 if (s35390a_get_datetime(client, &tm) < 0)
255 dev_warn(&client->dev, "clock needs to be set\n");
256
257 s35390a->rtc = rtc_device_register(s35390a_driver.driver.name,
258 &client->dev, &s35390a_rtc_ops, THIS_MODULE);
259
260 if (IS_ERR(s35390a->rtc)) {
261 err = PTR_ERR(s35390a->rtc);
262 goto exit_dummy;
263 }
264 return 0;
265
266exit_dummy:
267 for (i = 1; i < 8; ++i)
268 if (s35390a->client[i])
269 i2c_unregister_device(s35390a->client[i]);
270 kfree(s35390a);
271 i2c_set_clientdata(client, NULL);
272
273exit:
274 return err;
275}
276
277static int s35390a_remove(struct i2c_client *client)
278{
279 unsigned int i;
280
281 struct s35390a *s35390a = i2c_get_clientdata(client);
282 for (i = 1; i < 8; ++i)
283 if (s35390a->client[i])
284 i2c_unregister_device(s35390a->client[i]);
285
286 rtc_device_unregister(s35390a->rtc);
287 kfree(s35390a);
288 i2c_set_clientdata(client, NULL);
289
290 return 0;
291}
292
293static struct i2c_driver s35390a_driver = {
294 .driver = {
295 .name = "rtc-s35390a",
296 },
297 .probe = s35390a_probe,
298 .remove = s35390a_remove,
299};
300
301static int __init s35390a_rtc_init(void)
302{
303 return i2c_add_driver(&s35390a_driver);
304}
305
306static void __exit s35390a_rtc_exit(void)
307{
308 i2c_del_driver(&s35390a_driver);
309}
310
311MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>");
312MODULE_DESCRIPTION("S35390A RTC driver");
313MODULE_LICENSE("GPL");
314
315module_init(s35390a_rtc_init);
316module_exit(s35390a_rtc_exit);
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index f69714a0e9e7..b19db20a0bef 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2310,10 +2310,8 @@ static int
2310dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) 2310dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
2311{ 2311{
2312 2312
2313 /* check failed CCW */ 2313 if (cqr1->startdev != cqr2->startdev)
2314 if (cqr1->irb.scsw.cpa != cqr2->irb.scsw.cpa) { 2314 return 0;
2315 // return 0; /* CCW doesn't match */
2316 }
2317 2315
2318 if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) 2316 if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons)
2319 return 0; 2317 return 0;
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 28a86f070048..556063e8f7a9 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -62,8 +62,10 @@ dasd_devices_show(struct seq_file *m, void *v)
62 return 0; 62 return 0;
63 if (device->block) 63 if (device->block)
64 block = device->block; 64 block = device->block;
65 else 65 else {
66 dasd_put_device(device);
66 return 0; 67 return 0;
68 }
67 /* Print device number. */ 69 /* Print device number. */
68 seq_printf(m, "%s", device->cdev->dev.bus_id); 70 seq_printf(m, "%s", device->cdev->dev.bus_id);
69 /* Print discipline string. */ 71 /* Print discipline string. */
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 389346cda6c8..07c7f31081bc 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = {
151}; 151};
152 152
153struct kbdiacruc accent_table[MAX_DIACR] = { 153struct kbdiacruc accent_table[MAX_DIACR] = {
154 {'^', 'c', '\003'}, {'^', 'd', '\004'}, 154 {'^', 'c', 0003}, {'^', 'd', 0004},
155 {'^', 'z', '\032'}, {'^', '\012', '\000'}, 155 {'^', 'z', 0032}, {'^', 0012, 0000},
156}; 156};
157 157
158unsigned int accent_table_size = 4; 158unsigned int accent_table_size = 4;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 92f527201792..f7b258dfd52c 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -367,7 +367,7 @@ sclp_vt220_timeout(unsigned long data)
367 sclp_vt220_emit_current(); 367 sclp_vt220_emit_current();
368} 368}
369 369
370#define BUFFER_MAX_DELAY HZ/2 370#define BUFFER_MAX_DELAY HZ/20
371 371
372/* 372/*
373 * Internal implementation of the write function. Write COUNT bytes of data 373 * Internal implementation of the write function. Write COUNT bytes of data
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index d0c6fd3b1c19..7b0b81901297 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -490,10 +490,12 @@ static int ap_device_probe(struct device *dev)
490 int rc; 490 int rc;
491 491
492 ap_dev->drv = ap_drv; 492 ap_dev->drv = ap_drv;
493 spin_lock_bh(&ap_device_lock);
494 list_add(&ap_dev->list, &ap_device_list);
495 spin_unlock_bh(&ap_device_lock);
496 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 493 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
494 if (!rc) {
495 spin_lock_bh(&ap_device_lock);
496 list_add(&ap_dev->list, &ap_device_list);
497 spin_unlock_bh(&ap_device_lock);
498 }
497 return rc; 499 return rc;
498} 500}
499 501
@@ -532,11 +534,11 @@ static int ap_device_remove(struct device *dev)
532 534
533 ap_flush_queue(ap_dev); 535 ap_flush_queue(ap_dev);
534 del_timer_sync(&ap_dev->timeout); 536 del_timer_sync(&ap_dev->timeout);
535 if (ap_drv->remove)
536 ap_drv->remove(ap_dev);
537 spin_lock_bh(&ap_device_lock); 537 spin_lock_bh(&ap_device_lock);
538 list_del_init(&ap_dev->list); 538 list_del_init(&ap_dev->list);
539 spin_unlock_bh(&ap_device_lock); 539 spin_unlock_bh(&ap_device_lock);
540 if (ap_drv->remove)
541 ap_drv->remove(ap_dev);
540 spin_lock_bh(&ap_dev->lock); 542 spin_lock_bh(&ap_dev->lock);
541 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 543 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
542 spin_unlock_bh(&ap_dev->lock); 544 spin_unlock_bh(&ap_dev->lock);
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c3076217871e..d8a5c229c5a7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1851,8 +1851,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1851 } 1851 }
1852 } 1852 }
1853 /* See how many write buffers are required to hold this data */ 1853 /* See how many write buffers are required to hold this data */
1854 numBuffers= ( skb->len + privptr->p_env->write_size - 1) / 1854 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1855 ( privptr->p_env->write_size);
1856 1855
1857 /* If that number of buffers isn't available, give up for now */ 1856 /* If that number of buffers isn't available, give up for now */
1858 if (privptr->write_free_count < numBuffers || 1857 if (privptr->write_free_count < numBuffers ||
@@ -2114,8 +2113,7 @@ init_ccw_bk(struct net_device *dev)
2114 */ 2113 */
2115 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE; 2114 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
2116 ccw_pages_required= 2115 ccw_pages_required=
2117 (ccw_blocks_required+ccw_blocks_perpage -1) / 2116 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
2118 ccw_blocks_perpage;
2119 2117
2120#ifdef DEBUGMSG 2118#ifdef DEBUGMSG
2121 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n", 2119 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
@@ -2131,30 +2129,29 @@ init_ccw_bk(struct net_device *dev)
2131 * provide good performance. With packing buffers support 32k 2129 * provide good performance. With packing buffers support 32k
2132 * buffers are used. 2130 * buffers are used.
2133 */ 2131 */
2134 if (privptr->p_env->read_size < PAGE_SIZE) { 2132 if (privptr->p_env->read_size < PAGE_SIZE) {
2135 claw_reads_perpage= PAGE_SIZE / privptr->p_env->read_size; 2133 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
2136 claw_read_pages= (privptr->p_env->read_buffers + 2134 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
2137 claw_reads_perpage -1) / claw_reads_perpage; 2135 claw_reads_perpage);
2138 } 2136 }
2139 else { /* > or equal */ 2137 else { /* > or equal */
2140 privptr->p_buff_pages_perread= 2138 privptr->p_buff_pages_perread =
2141 (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE; 2139 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
2142 claw_read_pages= 2140 claw_read_pages = privptr->p_env->read_buffers *
2143 privptr->p_env->read_buffers * privptr->p_buff_pages_perread; 2141 privptr->p_buff_pages_perread;
2144 } 2142 }
2145 if (privptr->p_env->write_size < PAGE_SIZE) { 2143 if (privptr->p_env->write_size < PAGE_SIZE) {
2146 claw_writes_perpage= 2144 claw_writes_perpage =
2147 PAGE_SIZE / privptr->p_env->write_size; 2145 PAGE_SIZE / privptr->p_env->write_size;
2148 claw_write_pages= 2146 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
2149 (privptr->p_env->write_buffers + claw_writes_perpage -1) / 2147 claw_writes_perpage);
2150 claw_writes_perpage;
2151 2148
2152 } 2149 }
2153 else { /* > or equal */ 2150 else { /* > or equal */
2154 privptr->p_buff_pages_perwrite= 2151 privptr->p_buff_pages_perwrite =
2155 (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE; 2152 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
2156 claw_write_pages= 2153 claw_write_pages = privptr->p_env->write_buffers *
2157 privptr->p_env->write_buffers * privptr->p_buff_pages_perwrite; 2154 privptr->p_buff_pages_perwrite;
2158 } 2155 }
2159#ifdef DEBUGMSG 2156#ifdef DEBUGMSG
2160 if (privptr->p_env->read_size < PAGE_SIZE) { 2157 if (privptr->p_env->read_size < PAGE_SIZE) {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a7a0813b24cb..c46666a24809 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -992,6 +992,16 @@ config SCSI_IZIP_SLOW_CTR
992 992
993 Generally, saying N is fine. 993 Generally, saying N is fine.
994 994
995config SCSI_MVSAS
996 tristate "Marvell 88SE6440 SAS/SATA support"
997 depends on PCI && SCSI
998 select SCSI_SAS_LIBSAS
999 help
1000 This driver supports Marvell SAS/SATA PCI devices.
1001
1002 To compiler this driver as a module, choose M here: the module
1003 will be called mvsas.
1004
995config SCSI_NCR53C406A 1005config SCSI_NCR53C406A
996 tristate "NCR53c406a SCSI support" 1006 tristate "NCR53c406a SCSI support"
997 depends on ISA && SCSI 1007 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 925c26b4fff9..23e6ecbd4778 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -119,6 +119,7 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
119obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ 119obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
120obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 120obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
121obj-$(CONFIG_SCSI_STEX) += stex.o 121obj-$(CONFIG_SCSI_STEX) += stex.o
122obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
122obj-$(CONFIG_PS3_ROM) += ps3rom.o 123obj-$(CONFIG_PS3_ROM) += ps3rom.o
123 124
124obj-$(CONFIG_ARM) += arm/ 125obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 4150c8a8fdc2..dfaaae5e73ae 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -89,7 +89,7 @@ ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
89 pci_save_state(pdev); 89 pci_save_state(pdev);
90 pci_disable_device(pdev); 90 pci_disable_device(pdev);
91 91
92 if (mesg.event == PM_EVENT_SUSPEND) 92 if (mesg.event & PM_EVENT_SLEEP)
93 pci_set_power_state(pdev, PCI_D3hot); 93 pci_set_power_state(pdev, PCI_D3hot);
94 94
95 return rc; 95 return rc;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 6d2ae641273c..64e62ce59c15 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -695,15 +695,16 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
695 scb_index = ahc_inb(ahc, SCB_TAG); 695 scb_index = ahc_inb(ahc, SCB_TAG);
696 scb = ahc_lookup_scb(ahc, scb_index); 696 scb = ahc_lookup_scb(ahc, scb_index);
697 if (devinfo.role == ROLE_INITIATOR) { 697 if (devinfo.role == ROLE_INITIATOR) {
698 if (scb == NULL) 698 if (bus_phase == P_MESGOUT) {
699 panic("HOST_MSG_LOOP with " 699 if (scb == NULL)
700 "invalid SCB %x\n", scb_index); 700 panic("HOST_MSG_LOOP with "
701 "invalid SCB %x\n",
702 scb_index);
701 703
702 if (bus_phase == P_MESGOUT)
703 ahc_setup_initiator_msgout(ahc, 704 ahc_setup_initiator_msgout(ahc,
704 &devinfo, 705 &devinfo,
705 scb); 706 scb);
706 else { 707 } else {
707 ahc->msg_type = 708 ahc->msg_type =
708 MSG_TYPE_INITIATOR_MSGIN; 709 MSG_TYPE_INITIATOR_MSGIN;
709 ahc->msgin_index = 0; 710 ahc->msgin_index = 0;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index dd6e21d6f1dd..3d3eaef65fb3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -134,7 +134,7 @@ ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
134 pci_save_state(pdev); 134 pci_save_state(pdev);
135 pci_disable_device(pdev); 135 pci_disable_device(pdev);
136 136
137 if (mesg.event == PM_EVENT_SUSPEND) 137 if (mesg.event & PM_EVENT_SLEEP)
138 pci_set_power_state(pdev, PCI_D3hot); 138 pci_set_power_state(pdev, PCI_D3hot);
139 139
140 return rc; 140 return rc;
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 32f513b1b78a..eb8efdcefe48 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -102,6 +102,7 @@ int asd_abort_task_set(struct domain_device *, u8 *lun);
102int asd_clear_aca(struct domain_device *, u8 *lun); 102int asd_clear_aca(struct domain_device *, u8 *lun);
103int asd_clear_task_set(struct domain_device *, u8 *lun); 103int asd_clear_task_set(struct domain_device *, u8 *lun);
104int asd_lu_reset(struct domain_device *, u8 *lun); 104int asd_lu_reset(struct domain_device *, u8 *lun);
105int asd_I_T_nexus_reset(struct domain_device *dev);
105int asd_query_task(struct sas_task *); 106int asd_query_task(struct sas_task *);
106 107
107/* ---------- Adapter and Port management ---------- */ 108/* ---------- Adapter and Port management ---------- */
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h
index 150f6706d23f..abc757559c1a 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.h
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.h
@@ -140,7 +140,7 @@ struct asd_ascb {
140 140
141 /* internally generated command */ 141 /* internally generated command */
142 struct timer_list timer; 142 struct timer_list timer;
143 struct completion completion; 143 struct completion *completion;
144 u8 tag_valid:1; 144 u8 tag_valid:1;
145 __be16 tag; /* error recovery only */ 145 __be16 tag; /* error recovery only */
146 146
@@ -294,7 +294,6 @@ static inline void asd_init_ascb(struct asd_ha_struct *asd_ha,
294 ascb->timer.function = NULL; 294 ascb->timer.function = NULL;
295 init_timer(&ascb->timer); 295 init_timer(&ascb->timer);
296 ascb->tc_index = -1; 296 ascb->tc_index = -1;
297 init_completion(&ascb->completion);
298} 297}
299 298
300/* Must be called with the tc_index_lock held! 299/* Must be called with the tc_index_lock held!
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 5d761eb67442..88d1e731b65e 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -1003,7 +1003,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
1003 .lldd_abort_task_set = asd_abort_task_set, 1003 .lldd_abort_task_set = asd_abort_task_set,
1004 .lldd_clear_aca = asd_clear_aca, 1004 .lldd_clear_aca = asd_clear_aca,
1005 .lldd_clear_task_set = asd_clear_task_set, 1005 .lldd_clear_task_set = asd_clear_task_set,
1006 .lldd_I_T_nexus_reset = NULL, 1006 .lldd_I_T_nexus_reset = asd_I_T_nexus_reset,
1007 .lldd_lu_reset = asd_lu_reset, 1007 .lldd_lu_reset = asd_lu_reset,
1008 .lldd_query_task = asd_query_task, 1008 .lldd_query_task = asd_query_task,
1009 1009
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 0febad4dd75f..ab350504ca5a 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -458,13 +458,19 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
458 tc_abort = le16_to_cpu(tc_abort); 458 tc_abort = le16_to_cpu(tc_abort);
459 459
460 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { 460 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
461 struct sas_task *task = ascb->uldd_task; 461 struct sas_task *task = a->uldd_task;
462
463 if (a->tc_index != tc_abort)
464 continue;
462 465
463 if (task && a->tc_index == tc_abort) { 466 if (task) {
464 failed_dev = task->dev; 467 failed_dev = task->dev;
465 sas_task_abort(task); 468 sas_task_abort(task);
466 break; 469 } else {
470 ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n",
471 a->scb->header.opcode);
467 } 472 }
473 break;
468 } 474 }
469 475
470 if (!failed_dev) { 476 if (!failed_dev) {
@@ -478,7 +484,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
478 * that the EH will wake up and do something. 484 * that the EH will wake up and do something.
479 */ 485 */
480 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { 486 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
481 struct sas_task *task = ascb->uldd_task; 487 struct sas_task *task = a->uldd_task;
482 488
483 if (task && 489 if (task &&
484 task->dev == failed_dev && 490 task->dev == failed_dev &&
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 965d4bb999d9..008df9ab92a5 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -343,11 +343,13 @@ Again:
343 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 343 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
344 task->task_state_flags |= SAS_TASK_STATE_DONE; 344 task->task_state_flags |= SAS_TASK_STATE_DONE;
345 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { 345 if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) {
346 struct completion *completion = ascb->completion;
346 spin_unlock_irqrestore(&task->task_state_lock, flags); 347 spin_unlock_irqrestore(&task->task_state_lock, flags);
347 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " 348 ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x "
348 "stat 0x%x but aborted by upper layer!\n", 349 "stat 0x%x but aborted by upper layer!\n",
349 task, opcode, ts->resp, ts->stat); 350 task, opcode, ts->resp, ts->stat);
350 complete(&ascb->completion); 351 if (completion)
352 complete(completion);
351 } else { 353 } else {
352 spin_unlock_irqrestore(&task->task_state_lock, flags); 354 spin_unlock_irqrestore(&task->task_state_lock, flags);
353 task->lldd_task = NULL; 355 task->lldd_task = NULL;
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index b52124f3d3ac..b9ac8f703a1d 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -53,50 +53,64 @@ static int asd_enqueue_internal(struct asd_ascb *ascb,
53 return res; 53 return res;
54} 54}
55 55
56static inline void asd_timedout_common(unsigned long data) 56/* ---------- CLEAR NEXUS ---------- */
57{
58 struct asd_ascb *ascb = (void *) data;
59 struct asd_seq_data *seq = &ascb->ha->seq;
60 unsigned long flags;
61 57
62 spin_lock_irqsave(&seq->pend_q_lock, flags); 58struct tasklet_completion_status {
63 seq->pending--; 59 int dl_opcode;
64 list_del_init(&ascb->list); 60 int tmf_state;
65 spin_unlock_irqrestore(&seq->pend_q_lock, flags); 61 u8 tag_valid:1;
66} 62 __be16 tag;
63};
64
65#define DECLARE_TCS(tcs) \
66 struct tasklet_completion_status tcs = { \
67 .dl_opcode = 0, \
68 .tmf_state = 0, \
69 .tag_valid = 0, \
70 .tag = 0, \
71 }
67 72
68/* ---------- CLEAR NEXUS ---------- */
69 73
70static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, 74static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
71 struct done_list_struct *dl) 75 struct done_list_struct *dl)
72{ 76{
77 struct tasklet_completion_status *tcs = ascb->uldd_task;
73 ASD_DPRINTK("%s: here\n", __FUNCTION__); 78 ASD_DPRINTK("%s: here\n", __FUNCTION__);
74 if (!del_timer(&ascb->timer)) { 79 if (!del_timer(&ascb->timer)) {
75 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); 80 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
76 return; 81 return;
77 } 82 }
78 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); 83 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
79 ascb->uldd_task = (void *) (unsigned long) dl->opcode; 84 tcs->dl_opcode = dl->opcode;
80 complete(&ascb->completion); 85 complete(ascb->completion);
86 asd_ascb_free(ascb);
81} 87}
82 88
83static void asd_clear_nexus_timedout(unsigned long data) 89static void asd_clear_nexus_timedout(unsigned long data)
84{ 90{
85 struct asd_ascb *ascb = (void *) data; 91 struct asd_ascb *ascb = (void *)data;
92 struct tasklet_completion_status *tcs = ascb->uldd_task;
86 93
87 ASD_DPRINTK("%s: here\n", __FUNCTION__); 94 ASD_DPRINTK("%s: here\n", __FUNCTION__);
88 asd_timedout_common(data); 95 tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
89 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; 96 complete(ascb->completion);
90 complete(&ascb->completion);
91} 97}
92 98
93#define CLEAR_NEXUS_PRE \ 99#define CLEAR_NEXUS_PRE \
100 struct asd_ascb *ascb; \
101 struct scb *scb; \
102 int res; \
103 DECLARE_COMPLETION_ONSTACK(completion); \
104 DECLARE_TCS(tcs); \
105 \
94 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ 106 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
95 res = 1; \ 107 res = 1; \
96 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ 108 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
97 if (!ascb) \ 109 if (!ascb) \
98 return -ENOMEM; \ 110 return -ENOMEM; \
99 \ 111 \
112 ascb->completion = &completion; \
113 ascb->uldd_task = &tcs; \
100 scb = ascb->scb; \ 114 scb = ascb->scb; \
101 scb->header.opcode = CLEAR_NEXUS 115 scb->header.opcode = CLEAR_NEXUS
102 116
@@ -107,10 +121,11 @@ static void asd_clear_nexus_timedout(unsigned long data)
107 if (res) \ 121 if (res) \
108 goto out_err; \ 122 goto out_err; \
109 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ 123 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
110 wait_for_completion(&ascb->completion); \ 124 wait_for_completion(&completion); \
111 res = (int) (unsigned long) ascb->uldd_task; \ 125 res = tcs.dl_opcode; \
112 if (res == TC_NO_ERROR) \ 126 if (res == TC_NO_ERROR) \
113 res = TMF_RESP_FUNC_COMPLETE; \ 127 res = TMF_RESP_FUNC_COMPLETE; \
128 return res; \
114out_err: \ 129out_err: \
115 asd_ascb_free(ascb); \ 130 asd_ascb_free(ascb); \
116 return res 131 return res
@@ -118,9 +133,6 @@ out_err: \
118int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) 133int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
119{ 134{
120 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; 135 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
121 struct asd_ascb *ascb;
122 struct scb *scb;
123 int res;
124 136
125 CLEAR_NEXUS_PRE; 137 CLEAR_NEXUS_PRE;
126 scb->clear_nexus.nexus = NEXUS_ADAPTER; 138 scb->clear_nexus.nexus = NEXUS_ADAPTER;
@@ -130,9 +142,6 @@ int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
130int asd_clear_nexus_port(struct asd_sas_port *port) 142int asd_clear_nexus_port(struct asd_sas_port *port)
131{ 143{
132 struct asd_ha_struct *asd_ha = port->ha->lldd_ha; 144 struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
133 struct asd_ascb *ascb;
134 struct scb *scb;
135 int res;
136 145
137 CLEAR_NEXUS_PRE; 146 CLEAR_NEXUS_PRE;
138 scb->clear_nexus.nexus = NEXUS_PORT; 147 scb->clear_nexus.nexus = NEXUS_PORT;
@@ -140,37 +149,77 @@ int asd_clear_nexus_port(struct asd_sas_port *port)
140 CLEAR_NEXUS_POST; 149 CLEAR_NEXUS_POST;
141} 150}
142 151
143#if 0 152enum clear_nexus_phase {
144static int asd_clear_nexus_I_T(struct domain_device *dev) 153 NEXUS_PHASE_PRE,
154 NEXUS_PHASE_POST,
155 NEXUS_PHASE_RESUME,
156};
157
158static int asd_clear_nexus_I_T(struct domain_device *dev,
159 enum clear_nexus_phase phase)
145{ 160{
146 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; 161 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
147 struct asd_ascb *ascb;
148 struct scb *scb;
149 int res;
150 162
151 CLEAR_NEXUS_PRE; 163 CLEAR_NEXUS_PRE;
152 scb->clear_nexus.nexus = NEXUS_I_T; 164 scb->clear_nexus.nexus = NEXUS_I_T;
153 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; 165 switch (phase) {
154 if (dev->tproto) 166 case NEXUS_PHASE_PRE:
155 scb->clear_nexus.flags |= SUSPEND_TX; 167 scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
168 break;
169 case NEXUS_PHASE_POST:
170 scb->clear_nexus.flags = SEND_Q | NOTINQ;
171 break;
172 case NEXUS_PHASE_RESUME:
173 scb->clear_nexus.flags = RESUME_TX;
174 }
156 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) 175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
157 dev->lldd_dev); 176 dev->lldd_dev);
158 CLEAR_NEXUS_POST; 177 CLEAR_NEXUS_POST;
159} 178}
160#endif 179
180int asd_I_T_nexus_reset(struct domain_device *dev)
181{
182 int res, tmp_res, i;
183 struct sas_phy *phy = sas_find_local_phy(dev);
184 /* Standard mandates link reset for ATA (type 0) and
185 * hard reset for SSP (type 1) */
186 int reset_type = (dev->dev_type == SATA_DEV ||
187 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
188
189 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
190 /* send a hard reset */
191 ASD_DPRINTK("sending %s reset to %s\n",
192 reset_type ? "hard" : "soft", phy->dev.bus_id);
193 res = sas_phy_reset(phy, reset_type);
194 if (res == TMF_RESP_FUNC_COMPLETE) {
195 /* wait for the maximum settle time */
196 msleep(500);
197 /* clear all outstanding commands (keep nexus suspended) */
198 asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
199 }
200 for (i = 0 ; i < 3; i++) {
201 tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
202 if (tmp_res == TC_RESUME)
203 return res;
204 msleep(500);
205 }
206
207 /* This is a bit of a problem: the sequencer is still suspended
208 * and is refusing to resume. Hope it will resume on a bigger hammer
209 * or the disk is lost */
210 dev_printk(KERN_ERR, &phy->dev,
211 "Failed to resume nexus after reset 0x%x\n", tmp_res);
212
213 return TMF_RESP_FUNC_FAILED;
214}
161 215
162static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) 216static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
163{ 217{
164 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; 218 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
165 struct asd_ascb *ascb;
166 struct scb *scb;
167 int res;
168 219
169 CLEAR_NEXUS_PRE; 220 CLEAR_NEXUS_PRE;
170 scb->clear_nexus.nexus = NEXUS_I_T_L; 221 scb->clear_nexus.nexus = NEXUS_I_T_L;
171 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; 222 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
172 if (dev->tproto)
173 scb->clear_nexus.flags |= SUSPEND_TX;
174 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); 223 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) 224 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
176 dev->lldd_dev); 225 dev->lldd_dev);
@@ -181,9 +230,6 @@ static int asd_clear_nexus_tag(struct sas_task *task)
181{ 230{
182 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 231 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
183 struct asd_ascb *tascb = task->lldd_task; 232 struct asd_ascb *tascb = task->lldd_task;
184 struct asd_ascb *ascb;
185 struct scb *scb;
186 int res;
187 233
188 CLEAR_NEXUS_PRE; 234 CLEAR_NEXUS_PRE;
189 scb->clear_nexus.nexus = NEXUS_TAG; 235 scb->clear_nexus.nexus = NEXUS_TAG;
@@ -199,9 +245,6 @@ static int asd_clear_nexus_index(struct sas_task *task)
199{ 245{
200 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 246 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
201 struct asd_ascb *tascb = task->lldd_task; 247 struct asd_ascb *tascb = task->lldd_task;
202 struct asd_ascb *ascb;
203 struct scb *scb;
204 int res;
205 248
206 CLEAR_NEXUS_PRE; 249 CLEAR_NEXUS_PRE;
207 scb->clear_nexus.nexus = NEXUS_TRANS_CX; 250 scb->clear_nexus.nexus = NEXUS_TRANS_CX;
@@ -217,11 +260,11 @@ static int asd_clear_nexus_index(struct sas_task *task)
217static void asd_tmf_timedout(unsigned long data) 260static void asd_tmf_timedout(unsigned long data)
218{ 261{
219 struct asd_ascb *ascb = (void *) data; 262 struct asd_ascb *ascb = (void *) data;
263 struct tasklet_completion_status *tcs = ascb->uldd_task;
220 264
221 ASD_DPRINTK("tmf timed out\n"); 265 ASD_DPRINTK("tmf timed out\n");
222 asd_timedout_common(data); 266 tcs->tmf_state = TMF_RESP_FUNC_FAILED;
223 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; 267 complete(ascb->completion);
224 complete(&ascb->completion);
225} 268}
226 269
227static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, 270static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
@@ -273,18 +316,24 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
273static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, 316static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
274 struct done_list_struct *dl) 317 struct done_list_struct *dl)
275{ 318{
319 struct tasklet_completion_status *tcs;
320
276 if (!del_timer(&ascb->timer)) 321 if (!del_timer(&ascb->timer))
277 return; 322 return;
278 323
324 tcs = ascb->uldd_task;
279 ASD_DPRINTK("tmf tasklet complete\n"); 325 ASD_DPRINTK("tmf tasklet complete\n");
280 326
281 if (dl->opcode == TC_SSP_RESP) 327 tcs->dl_opcode = dl->opcode;
282 ascb->uldd_task = (void *) (unsigned long) 328
283 asd_get_tmf_resp_tasklet(ascb, dl); 329 if (dl->opcode == TC_SSP_RESP) {
284 else 330 tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
285 ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode; 331 tcs->tag_valid = ascb->tag_valid;
332 tcs->tag = ascb->tag;
333 }
286 334
287 complete(&ascb->completion); 335 complete(ascb->completion);
336 asd_ascb_free(ascb);
288} 337}
289 338
290static inline int asd_clear_nexus(struct sas_task *task) 339static inline int asd_clear_nexus(struct sas_task *task)
@@ -292,15 +341,19 @@ static inline int asd_clear_nexus(struct sas_task *task)
292 int res = TMF_RESP_FUNC_FAILED; 341 int res = TMF_RESP_FUNC_FAILED;
293 int leftover; 342 int leftover;
294 struct asd_ascb *tascb = task->lldd_task; 343 struct asd_ascb *tascb = task->lldd_task;
344 DECLARE_COMPLETION_ONSTACK(completion);
295 unsigned long flags; 345 unsigned long flags;
296 346
347 tascb->completion = &completion;
348
297 ASD_DPRINTK("task not done, clearing nexus\n"); 349 ASD_DPRINTK("task not done, clearing nexus\n");
298 if (tascb->tag_valid) 350 if (tascb->tag_valid)
299 res = asd_clear_nexus_tag(task); 351 res = asd_clear_nexus_tag(task);
300 else 352 else
301 res = asd_clear_nexus_index(task); 353 res = asd_clear_nexus_index(task);
302 leftover = wait_for_completion_timeout(&tascb->completion, 354 leftover = wait_for_completion_timeout(&completion,
303 AIC94XX_SCB_TIMEOUT); 355 AIC94XX_SCB_TIMEOUT);
356 tascb->completion = NULL;
304 ASD_DPRINTK("came back from clear nexus\n"); 357 ASD_DPRINTK("came back from clear nexus\n");
305 spin_lock_irqsave(&task->task_state_lock, flags); 358 spin_lock_irqsave(&task->task_state_lock, flags);
306 if (leftover < 1) 359 if (leftover < 1)
@@ -354,6 +407,11 @@ int asd_abort_task(struct sas_task *task)
354 struct asd_ascb *ascb = NULL; 407 struct asd_ascb *ascb = NULL;
355 struct scb *scb; 408 struct scb *scb;
356 int leftover; 409 int leftover;
410 DECLARE_TCS(tcs);
411 DECLARE_COMPLETION_ONSTACK(completion);
412 DECLARE_COMPLETION_ONSTACK(tascb_completion);
413
414 tascb->completion = &tascb_completion;
357 415
358 spin_lock_irqsave(&task->task_state_lock, flags); 416 spin_lock_irqsave(&task->task_state_lock, flags);
359 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 417 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
@@ -367,8 +425,10 @@ int asd_abort_task(struct sas_task *task)
367 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); 425 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
368 if (!ascb) 426 if (!ascb)
369 return -ENOMEM; 427 return -ENOMEM;
370 scb = ascb->scb;
371 428
429 ascb->uldd_task = &tcs;
430 ascb->completion = &completion;
431 scb = ascb->scb;
372 scb->header.opcode = SCB_ABORT_TASK; 432 scb->header.opcode = SCB_ABORT_TASK;
373 433
374 switch (task->task_proto) { 434 switch (task->task_proto) {
@@ -410,13 +470,12 @@ int asd_abort_task(struct sas_task *task)
410 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, 470 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
411 asd_tmf_timedout); 471 asd_tmf_timedout);
412 if (res) 472 if (res)
413 goto out; 473 goto out_free;
414 wait_for_completion(&ascb->completion); 474 wait_for_completion(&completion);
415 ASD_DPRINTK("tmf came back\n"); 475 ASD_DPRINTK("tmf came back\n");
416 476
417 res = (int) (unsigned long) ascb->uldd_task; 477 tascb->tag = tcs.tag;
418 tascb->tag = ascb->tag; 478 tascb->tag_valid = tcs.tag_valid;
419 tascb->tag_valid = ascb->tag_valid;
420 479
421 spin_lock_irqsave(&task->task_state_lock, flags); 480 spin_lock_irqsave(&task->task_state_lock, flags);
422 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 481 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
@@ -427,63 +486,68 @@ int asd_abort_task(struct sas_task *task)
427 } 486 }
428 spin_unlock_irqrestore(&task->task_state_lock, flags); 487 spin_unlock_irqrestore(&task->task_state_lock, flags);
429 488
430 switch (res) { 489 if (tcs.dl_opcode == TC_SSP_RESP) {
431 /* The task to be aborted has been sent to the device. 490 /* The task to be aborted has been sent to the device.
432 * We got a Response IU for the ABORT TASK TMF. */ 491 * We got a Response IU for the ABORT TASK TMF. */
433 case TC_NO_ERROR + 0xFF00: 492 if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
434 case TMF_RESP_FUNC_COMPLETE: 493 res = asd_clear_nexus(task);
435 case TMF_RESP_FUNC_FAILED: 494 else
436 res = asd_clear_nexus(task); 495 res = tcs.tmf_state;
437 break; 496 } else if (tcs.dl_opcode == TC_NO_ERROR &&
438 case TMF_RESP_INVALID_FRAME: 497 tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
439 case TMF_RESP_OVERLAPPED_TAG: 498 /* timeout */
440 case TMF_RESP_FUNC_ESUPP:
441 case TMF_RESP_NO_LUN:
442 goto out_done; break;
443 }
444 /* In the following we assume that the managing layer
445 * will _never_ make a mistake, when issuing ABORT TASK.
446 */
447 switch (res) {
448 default:
449 res = asd_clear_nexus(task);
450 /* fallthrough */
451 case TC_NO_ERROR + 0xFF00:
452 case TMF_RESP_FUNC_COMPLETE:
453 break;
454 /* The task hasn't been sent to the device xor we never got
455 * a (sane) Response IU for the ABORT TASK TMF.
456 */
457 case TF_NAK_RECV + 0xFF00:
458 res = TMF_RESP_INVALID_FRAME;
459 break;
460 case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */
461 res = TMF_RESP_FUNC_FAILED; 499 res = TMF_RESP_FUNC_FAILED;
462 leftover = wait_for_completion_timeout(&tascb->completion, 500 } else {
463 AIC94XX_SCB_TIMEOUT); 501 /* In the following we assume that the managing layer
464 spin_lock_irqsave(&task->task_state_lock, flags); 502 * will _never_ make a mistake, when issuing ABORT
465 if (leftover < 1) 503 * TASK.
504 */
505 switch (tcs.dl_opcode) {
506 default:
507 res = asd_clear_nexus(task);
508 /* fallthrough */
509 case TC_NO_ERROR:
510 break;
511 /* The task hasn't been sent to the device xor
512 * we never got a (sane) Response IU for the
513 * ABORT TASK TMF.
514 */
515 case TF_NAK_RECV:
516 res = TMF_RESP_INVALID_FRAME;
517 break;
518 case TF_TMF_TASK_DONE: /* done but not reported yet */
466 res = TMF_RESP_FUNC_FAILED; 519 res = TMF_RESP_FUNC_FAILED;
467 if (task->task_state_flags & SAS_TASK_STATE_DONE) 520 leftover =
521 wait_for_completion_timeout(&tascb_completion,
522 AIC94XX_SCB_TIMEOUT);
523 spin_lock_irqsave(&task->task_state_lock, flags);
524 if (leftover < 1)
525 res = TMF_RESP_FUNC_FAILED;
526 if (task->task_state_flags & SAS_TASK_STATE_DONE)
527 res = TMF_RESP_FUNC_COMPLETE;
528 spin_unlock_irqrestore(&task->task_state_lock, flags);
529 break;
530 case TF_TMF_NO_TAG:
531 case TF_TMF_TAG_FREE: /* the tag is in the free list */
532 case TF_TMF_NO_CONN_HANDLE: /* no such device */
468 res = TMF_RESP_FUNC_COMPLETE; 533 res = TMF_RESP_FUNC_COMPLETE;
469 spin_unlock_irqrestore(&task->task_state_lock, flags); 534 break;
470 goto out_done; 535 case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
471 case TF_TMF_NO_TAG + 0xFF00: 536 res = TMF_RESP_FUNC_ESUPP;
472 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ 537 break;
473 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ 538 }
474 res = TMF_RESP_FUNC_COMPLETE;
475 goto out_done;
476 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
477 res = TMF_RESP_FUNC_ESUPP;
478 goto out;
479 } 539 }
480out_done: 540 out_done:
541 tascb->completion = NULL;
481 if (res == TMF_RESP_FUNC_COMPLETE) { 542 if (res == TMF_RESP_FUNC_COMPLETE) {
482 task->lldd_task = NULL; 543 task->lldd_task = NULL;
483 mb(); 544 mb();
484 asd_ascb_free(tascb); 545 asd_ascb_free(tascb);
485 } 546 }
486out: 547 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
548 return res;
549
550 out_free:
487 asd_ascb_free(ascb); 551 asd_ascb_free(ascb);
488 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); 552 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
489 return res; 553 return res;
@@ -511,6 +575,8 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
511 struct asd_ascb *ascb; 575 struct asd_ascb *ascb;
512 int res = 1; 576 int res = 1;
513 struct scb *scb; 577 struct scb *scb;
578 DECLARE_COMPLETION_ONSTACK(completion);
579 DECLARE_TCS(tcs);
514 580
515 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 581 if (!(dev->tproto & SAS_PROTOCOL_SSP))
516 return TMF_RESP_FUNC_ESUPP; 582 return TMF_RESP_FUNC_ESUPP;
@@ -518,6 +584,9 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
518 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); 584 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
519 if (!ascb) 585 if (!ascb)
520 return -ENOMEM; 586 return -ENOMEM;
587
588 ascb->completion = &completion;
589 ascb->uldd_task = &tcs;
521 scb = ascb->scb; 590 scb = ascb->scb;
522 591
523 if (tmf == TMF_QUERY_TASK) 592 if (tmf == TMF_QUERY_TASK)
@@ -550,31 +619,32 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
550 asd_tmf_timedout); 619 asd_tmf_timedout);
551 if (res) 620 if (res)
552 goto out_err; 621 goto out_err;
553 wait_for_completion(&ascb->completion); 622 wait_for_completion(&completion);
554 res = (int) (unsigned long) ascb->uldd_task;
555 623
556 switch (res) { 624 switch (tcs.dl_opcode) {
557 case TC_NO_ERROR + 0xFF00: 625 case TC_NO_ERROR:
558 res = TMF_RESP_FUNC_COMPLETE; 626 res = TMF_RESP_FUNC_COMPLETE;
559 break; 627 break;
560 case TF_NAK_RECV + 0xFF00: 628 case TF_NAK_RECV:
561 res = TMF_RESP_INVALID_FRAME; 629 res = TMF_RESP_INVALID_FRAME;
562 break; 630 break;
563 case TF_TMF_TASK_DONE + 0xFF00: 631 case TF_TMF_TASK_DONE:
564 res = TMF_RESP_FUNC_FAILED; 632 res = TMF_RESP_FUNC_FAILED;
565 break; 633 break;
566 case TF_TMF_NO_TAG + 0xFF00: 634 case TF_TMF_NO_TAG:
567 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ 635 case TF_TMF_TAG_FREE: /* the tag is in the free list */
568 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ 636 case TF_TMF_NO_CONN_HANDLE: /* no such device */
569 res = TMF_RESP_FUNC_COMPLETE; 637 res = TMF_RESP_FUNC_COMPLETE;
570 break; 638 break;
571 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ 639 case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
572 res = TMF_RESP_FUNC_ESUPP; 640 res = TMF_RESP_FUNC_ESUPP;
573 break; 641 break;
574 default: 642 default:
575 /* Allow TMF response codes to propagate upwards */ 643 /* Allow TMF response codes to propagate upwards */
644 res = tcs.dl_opcode;
576 break; 645 break;
577 } 646 }
647 return res;
578out_err: 648out_err:
579 asd_ascb_free(ascb); 649 asd_ascb_free(ascb);
580 return res; 650 return res;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 57786502e3ec..0393707bdfce 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@ struct class_device_attribute;
48/*The limit of outstanding scsi command that firmware can handle*/ 48/*The limit of outstanding scsi command that firmware can handle*/
49#define ARCMSR_MAX_OUTSTANDING_CMD 256 49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 320 50#define ARCMSR_MAX_FREECCB_NUM 320
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24" 51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27"
52#define ARCMSR_SCSI_INITIATOR_ID 255 52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512 53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_XFER_SECTORS_B 4096 54#define ARCMSR_MAX_XFER_SECTORS_B 4096
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4f9ff32cfed0..f91f79c8007d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1387,18 +1387,16 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1387 switch(controlcode) { 1387 switch(controlcode) {
1388 1388
1389 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1389 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1390 unsigned long *ver_addr; 1390 unsigned char *ver_addr;
1391 uint8_t *pQbuffer, *ptmpQbuffer; 1391 uint8_t *pQbuffer, *ptmpQbuffer;
1392 int32_t allxfer_len = 0; 1392 int32_t allxfer_len = 0;
1393 void *tmp;
1394 1393
1395 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); 1394 ver_addr = kmalloc(1032, GFP_ATOMIC);
1396 ver_addr = (unsigned long *)tmp; 1395 if (!ver_addr) {
1397 if (!tmp) {
1398 retvalue = ARCMSR_MESSAGE_FAIL; 1396 retvalue = ARCMSR_MESSAGE_FAIL;
1399 goto message_out; 1397 goto message_out;
1400 } 1398 }
1401 ptmpQbuffer = (uint8_t *) ver_addr; 1399 ptmpQbuffer = ver_addr;
1402 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1400 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1403 && (allxfer_len < 1031)) { 1401 && (allxfer_len < 1031)) {
1404 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1402 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
@@ -1427,26 +1425,24 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1427 } 1425 }
1428 arcmsr_iop_message_read(acb); 1426 arcmsr_iop_message_read(acb);
1429 } 1427 }
1430 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); 1428 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1431 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1429 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1432 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1430 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1433 kfree(tmp); 1431 kfree(ver_addr);
1434 } 1432 }
1435 break; 1433 break;
1436 1434
1437 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1435 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1438 unsigned long *ver_addr; 1436 unsigned char *ver_addr;
1439 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1437 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1440 uint8_t *pQbuffer, *ptmpuserbuffer; 1438 uint8_t *pQbuffer, *ptmpuserbuffer;
1441 void *tmp;
1442 1439
1443 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); 1440 ver_addr = kmalloc(1032, GFP_ATOMIC);
1444 ver_addr = (unsigned long *)tmp; 1441 if (!ver_addr) {
1445 if (!tmp) {
1446 retvalue = ARCMSR_MESSAGE_FAIL; 1442 retvalue = ARCMSR_MESSAGE_FAIL;
1447 goto message_out; 1443 goto message_out;
1448 } 1444 }
1449 ptmpuserbuffer = (uint8_t *)ver_addr; 1445 ptmpuserbuffer = ver_addr;
1450 user_len = pcmdmessagefld->cmdmessage.Length; 1446 user_len = pcmdmessagefld->cmdmessage.Length;
1451 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); 1447 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
1452 wqbuf_lastindex = acb->wqbuf_lastindex; 1448 wqbuf_lastindex = acb->wqbuf_lastindex;
@@ -1492,7 +1488,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1492 retvalue = ARCMSR_MESSAGE_FAIL; 1488 retvalue = ARCMSR_MESSAGE_FAIL;
1493 } 1489 }
1494 } 1490 }
1495 kfree(tmp); 1491 kfree(ver_addr);
1496 } 1492 }
1497 break; 1493 break;
1498 1494
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 3e73e264972e..b65f4cf0eec9 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -313,7 +313,7 @@ typedef struct {
313 313
314 /* miscellaneous */ 314 /* miscellaneous */
315 int internal_done; /* flag to indicate request done */ 315 int internal_done; /* flag to indicate request done */
316 struct scsi_eh_save *ses; /* holds request sense restore info */ 316 struct scsi_eh_save ses; /* holds request sense restore info */
317 unsigned long magic_end; 317 unsigned long magic_end;
318} FAS216_Info; 318} FAS216_Info;
319 319
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 6d67f5c0eb8e..27ebd336409b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -160,7 +160,7 @@ static void gdth_readapp_event(gdth_ha_str *ha, unchar application,
160static void gdth_clear_events(void); 160static void gdth_clear_events(void);
161 161
162static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 162static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
163 char *buffer, ushort count, int to_buffer); 163 char *buffer, ushort count);
164static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); 164static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
165static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); 165static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive);
166 166
@@ -182,7 +182,6 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
182 unsigned int cmd, unsigned long arg); 182 unsigned int cmd, unsigned long arg);
183 183
184static void gdth_flush(gdth_ha_str *ha); 184static void gdth_flush(gdth_ha_str *ha);
185static int gdth_halt(struct notifier_block *nb, ulong event, void *buf);
186static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); 185static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *));
187static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, 186static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
188 struct gdth_cmndinfo *cmndinfo); 187 struct gdth_cmndinfo *cmndinfo);
@@ -417,12 +416,6 @@ static inline void gdth_set_sglist(struct scsi_cmnd *cmd,
417#include "gdth_proc.h" 416#include "gdth_proc.h"
418#include "gdth_proc.c" 417#include "gdth_proc.c"
419 418
420/* notifier block to get a notify on system shutdown/halt/reboot */
421static struct notifier_block gdth_notifier = {
422 gdth_halt, NULL, 0
423};
424static int notifier_disabled = 0;
425
426static gdth_ha_str *gdth_find_ha(int hanum) 419static gdth_ha_str *gdth_find_ha(int hanum)
427{ 420{
428 gdth_ha_str *ha; 421 gdth_ha_str *ha;
@@ -445,8 +438,8 @@ static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
445 for (i=0; i<GDTH_MAXCMDS; ++i) { 438 for (i=0; i<GDTH_MAXCMDS; ++i) {
446 if (ha->cmndinfo[i].index == 0) { 439 if (ha->cmndinfo[i].index == 0) {
447 priv = &ha->cmndinfo[i]; 440 priv = &ha->cmndinfo[i];
448 priv->index = i+1;
449 memset(priv, 0, sizeof(*priv)); 441 memset(priv, 0, sizeof(*priv));
442 priv->index = i+1;
450 break; 443 break;
451 } 444 }
452 } 445 }
@@ -493,7 +486,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
493 gdth_ha_str *ha = shost_priv(sdev->host); 486 gdth_ha_str *ha = shost_priv(sdev->host);
494 Scsi_Cmnd *scp; 487 Scsi_Cmnd *scp;
495 struct gdth_cmndinfo cmndinfo; 488 struct gdth_cmndinfo cmndinfo;
496 struct scatterlist one_sg;
497 DECLARE_COMPLETION_ONSTACK(wait); 489 DECLARE_COMPLETION_ONSTACK(wait);
498 int rval; 490 int rval;
499 491
@@ -507,13 +499,10 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
507 /* use request field to save the ptr. to completion struct. */ 499 /* use request field to save the ptr. to completion struct. */
508 scp->request = (struct request *)&wait; 500 scp->request = (struct request *)&wait;
509 scp->timeout_per_command = timeout*HZ; 501 scp->timeout_per_command = timeout*HZ;
510 sg_init_one(&one_sg, gdtcmd, sizeof(*gdtcmd));
511 gdth_set_sglist(scp, &one_sg);
512 gdth_set_sg_count(scp, 1);
513 gdth_set_bufflen(scp, sizeof(*gdtcmd));
514 scp->cmd_len = 12; 502 scp->cmd_len = 12;
515 memcpy(scp->cmnd, cmnd, 12); 503 memcpy(scp->cmnd, cmnd, 12);
516 cmndinfo.priority = IOCTL_PRI; 504 cmndinfo.priority = IOCTL_PRI;
505 cmndinfo.internal_cmd_str = gdtcmd;
517 cmndinfo.internal_command = 1; 506 cmndinfo.internal_command = 1;
518 507
519 TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); 508 TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
@@ -2355,7 +2344,7 @@ static void gdth_next(gdth_ha_str *ha)
2355 * buffers, kmap_atomic() as needed. 2344 * buffers, kmap_atomic() as needed.
2356 */ 2345 */
2357static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, 2346static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2358 char *buffer, ushort count, int to_buffer) 2347 char *buffer, ushort count)
2359{ 2348{
2360 ushort cpcount,i, max_sg = gdth_sg_count(scp); 2349 ushort cpcount,i, max_sg = gdth_sg_count(scp);
2361 ushort cpsum,cpnow; 2350 ushort cpsum,cpnow;
@@ -2381,10 +2370,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2381 } 2370 }
2382 local_irq_save(flags); 2371 local_irq_save(flags);
2383 address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; 2372 address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
2384 if (to_buffer) 2373 memcpy(address, buffer, cpnow);
2385 memcpy(buffer, address, cpnow);
2386 else
2387 memcpy(address, buffer, cpnow);
2388 flush_dcache_page(sg_page(sl)); 2374 flush_dcache_page(sg_page(sl));
2389 kunmap_atomic(address, KM_BIO_SRC_IRQ); 2375 kunmap_atomic(address, KM_BIO_SRC_IRQ);
2390 local_irq_restore(flags); 2376 local_irq_restore(flags);
@@ -2438,7 +2424,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2438 strcpy(inq.vendor,ha->oem_name); 2424 strcpy(inq.vendor,ha->oem_name);
2439 sprintf(inq.product,"Host Drive #%02d",t); 2425 sprintf(inq.product,"Host Drive #%02d",t);
2440 strcpy(inq.revision," "); 2426 strcpy(inq.revision," ");
2441 gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data), 0); 2427 gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
2442 break; 2428 break;
2443 2429
2444 case REQUEST_SENSE: 2430 case REQUEST_SENSE:
@@ -2448,7 +2434,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2448 sd.key = NO_SENSE; 2434 sd.key = NO_SENSE;
2449 sd.info = 0; 2435 sd.info = 0;
2450 sd.add_length= 0; 2436 sd.add_length= 0;
2451 gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data), 0); 2437 gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
2452 break; 2438 break;
2453 2439
2454 case MODE_SENSE: 2440 case MODE_SENSE:
@@ -2460,7 +2446,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2460 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; 2446 mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
2461 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; 2447 mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
2462 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); 2448 mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
2463 gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data), 0); 2449 gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
2464 break; 2450 break;
2465 2451
2466 case READ_CAPACITY: 2452 case READ_CAPACITY:
@@ -2470,7 +2456,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2470 else 2456 else
2471 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); 2457 rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
2472 rdc.block_length = cpu_to_be32(SECTOR_SIZE); 2458 rdc.block_length = cpu_to_be32(SECTOR_SIZE);
2473 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data), 0); 2459 gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
2474 break; 2460 break;
2475 2461
2476 case SERVICE_ACTION_IN: 2462 case SERVICE_ACTION_IN:
@@ -2482,7 +2468,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2482 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); 2468 rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
2483 rdc16.block_length = cpu_to_be32(SECTOR_SIZE); 2469 rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
2484 gdth_copy_internal_data(ha, scp, (char*)&rdc16, 2470 gdth_copy_internal_data(ha, scp, (char*)&rdc16,
2485 sizeof(gdth_rdcap16_data), 0); 2471 sizeof(gdth_rdcap16_data));
2486 } else { 2472 } else {
2487 scp->result = DID_ABORT << 16; 2473 scp->result = DID_ABORT << 16;
2488 } 2474 }
@@ -2852,6 +2838,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b)
2852static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) 2838static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2853{ 2839{
2854 register gdth_cmd_str *cmdp; 2840 register gdth_cmd_str *cmdp;
2841 struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
2855 int cmd_index; 2842 int cmd_index;
2856 2843
2857 cmdp= ha->pccb; 2844 cmdp= ha->pccb;
@@ -2860,7 +2847,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
2860 if (ha->type==GDT_EISA && ha->cmd_cnt>0) 2847 if (ha->type==GDT_EISA && ha->cmd_cnt>0)
2861 return 0; 2848 return 0;
2862 2849
2863 gdth_copy_internal_data(ha, scp, (char *)cmdp, sizeof(gdth_cmd_str), 1); 2850 *cmdp = *cmndinfo->internal_cmd_str;
2864 cmdp->RequestBuffer = scp; 2851 cmdp->RequestBuffer = scp;
2865 2852
2866 /* search free command index */ 2853 /* search free command index */
@@ -3794,6 +3781,8 @@ static void gdth_timeout(ulong data)
3794 gdth_ha_str *ha; 3781 gdth_ha_str *ha;
3795 ulong flags; 3782 ulong flags;
3796 3783
3784 BUG_ON(list_empty(&gdth_instances));
3785
3797 ha = list_first_entry(&gdth_instances, gdth_ha_str, list); 3786 ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
3798 spin_lock_irqsave(&ha->smp_lock, flags); 3787 spin_lock_irqsave(&ha->smp_lock, flags);
3799 3788
@@ -4669,45 +4658,6 @@ static void gdth_flush(gdth_ha_str *ha)
4669 } 4658 }
4670} 4659}
4671 4660
4672/* shutdown routine */
4673static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
4674{
4675 gdth_ha_str *ha;
4676#ifndef __alpha__
4677 gdth_cmd_str gdtcmd;
4678 char cmnd[MAX_COMMAND_SIZE];
4679#endif
4680
4681 if (notifier_disabled)
4682 return NOTIFY_OK;
4683
4684 TRACE2(("gdth_halt() event %d\n",(int)event));
4685 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
4686 return NOTIFY_DONE;
4687
4688 notifier_disabled = 1;
4689 printk("GDT-HA: Flushing all host drives .. ");
4690 list_for_each_entry(ha, &gdth_instances, list) {
4691 gdth_flush(ha);
4692
4693#ifndef __alpha__
4694 /* controller reset */
4695 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
4696 gdtcmd.BoardNode = LOCALBOARD;
4697 gdtcmd.Service = CACHESERVICE;
4698 gdtcmd.OpCode = GDT_RESET;
4699 TRACE2(("gdth_halt(): reset controller %d\n", ha->hanum));
4700 gdth_execute(ha->shost, &gdtcmd, cmnd, 10, NULL);
4701#endif
4702 }
4703 printk("Done.\n");
4704
4705#ifdef GDTH_STATISTICS
4706 del_timer(&gdth_timer);
4707#endif
4708 return NOTIFY_OK;
4709}
4710
4711/* configure lun */ 4661/* configure lun */
4712static int gdth_slave_configure(struct scsi_device *sdev) 4662static int gdth_slave_configure(struct scsi_device *sdev)
4713{ 4663{
@@ -5142,13 +5092,13 @@ static void gdth_remove_one(gdth_ha_str *ha)
5142 5092
5143 scsi_remove_host(shp); 5093 scsi_remove_host(shp);
5144 5094
5095 gdth_flush(ha);
5096
5145 if (ha->sdev) { 5097 if (ha->sdev) {
5146 scsi_free_host_dev(ha->sdev); 5098 scsi_free_host_dev(ha->sdev);
5147 ha->sdev = NULL; 5099 ha->sdev = NULL;
5148 } 5100 }
5149 5101
5150 gdth_flush(ha);
5151
5152 if (shp->irq) 5102 if (shp->irq)
5153 free_irq(shp->irq,ha); 5103 free_irq(shp->irq,ha);
5154 5104
@@ -5174,6 +5124,24 @@ static void gdth_remove_one(gdth_ha_str *ha)
5174 scsi_host_put(shp); 5124 scsi_host_put(shp);
5175} 5125}
5176 5126
5127static int gdth_halt(struct notifier_block *nb, ulong event, void *buf)
5128{
5129 gdth_ha_str *ha;
5130
5131 TRACE2(("gdth_halt() event %d\n", (int)event));
5132 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
5133 return NOTIFY_DONE;
5134
5135 list_for_each_entry(ha, &gdth_instances, list)
5136 gdth_flush(ha);
5137
5138 return NOTIFY_OK;
5139}
5140
5141static struct notifier_block gdth_notifier = {
5142 gdth_halt, NULL, 0
5143};
5144
5177static int __init gdth_init(void) 5145static int __init gdth_init(void)
5178{ 5146{
5179 if (disable) { 5147 if (disable) {
@@ -5236,7 +5204,6 @@ static int __init gdth_init(void)
5236 add_timer(&gdth_timer); 5204 add_timer(&gdth_timer);
5237#endif 5205#endif
5238 major = register_chrdev(0,"gdth", &gdth_fops); 5206 major = register_chrdev(0,"gdth", &gdth_fops);
5239 notifier_disabled = 0;
5240 register_reboot_notifier(&gdth_notifier); 5207 register_reboot_notifier(&gdth_notifier);
5241 gdth_polling = FALSE; 5208 gdth_polling = FALSE;
5242 return 0; 5209 return 0;
@@ -5246,14 +5213,15 @@ static void __exit gdth_exit(void)
5246{ 5213{
5247 gdth_ha_str *ha; 5214 gdth_ha_str *ha;
5248 5215
5249 list_for_each_entry(ha, &gdth_instances, list) 5216 unregister_chrdev(major, "gdth");
5250 gdth_remove_one(ha); 5217 unregister_reboot_notifier(&gdth_notifier);
5251 5218
5252#ifdef GDTH_STATISTICS 5219#ifdef GDTH_STATISTICS
5253 del_timer(&gdth_timer); 5220 del_timer_sync(&gdth_timer);
5254#endif 5221#endif
5255 unregister_chrdev(major,"gdth"); 5222
5256 unregister_reboot_notifier(&gdth_notifier); 5223 list_for_each_entry(ha, &gdth_instances, list)
5224 gdth_remove_one(ha);
5257} 5225}
5258 5226
5259module_init(gdth_init); 5227module_init(gdth_init);
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index 1434c6b0297c..26e4e92515e0 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -915,6 +915,7 @@ typedef struct {
915 struct gdth_cmndinfo { /* per-command private info */ 915 struct gdth_cmndinfo { /* per-command private info */
916 int index; 916 int index;
917 int internal_command; /* don't call scsi_done */ 917 int internal_command; /* don't call scsi_done */
918 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
918 dma_addr_t sense_paddr; /* sense dma-addr */ 919 dma_addr_t sense_paddr; /* sense dma-addr */
919 unchar priority; 920 unchar priority;
920 int timeout; 921 int timeout;
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index de5773443c62..ce0228e26aec 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -694,15 +694,13 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
694{ 694{
695 ulong flags; 695 ulong flags;
696 696
697 spin_lock_irqsave(&ha->smp_lock, flags);
698
699 if (buf == ha->pscratch) { 697 if (buf == ha->pscratch) {
698 spin_lock_irqsave(&ha->smp_lock, flags);
700 ha->scratch_busy = FALSE; 699 ha->scratch_busy = FALSE;
700 spin_unlock_irqrestore(&ha->smp_lock, flags);
701 } else { 701 } else {
702 pci_free_consistent(ha->pdev, size, buf, paddr); 702 pci_free_consistent(ha->pdev, size, buf, paddr);
703 } 703 }
704
705 spin_unlock_irqrestore(&ha->smp_lock, flags);
706} 704}
707 705
708#ifdef GDTH_IOCTL_PROC 706#ifdef GDTH_IOCTL_PROC
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index bd62131b97a1..e5881e92d0fb 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -290,7 +290,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
290 int err = 0; 290 int err = 0;
291 291
292 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], 292 dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
293 cmd->usg_sg); 293 scsi_sg_count(sc));
294 294
295 if (scsi_sg_count(sc)) 295 if (scsi_sg_count(sc))
296 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); 296 err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
@@ -838,9 +838,6 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
838 if (!shost) 838 if (!shost)
839 goto free_vport; 839 goto free_vport;
840 shost->transportt = ibmvstgt_transport_template; 840 shost->transportt = ibmvstgt_transport_template;
841 err = scsi_tgt_alloc_queue(shost);
842 if (err)
843 goto put_host;
844 841
845 target = host_to_srp_target(shost); 842 target = host_to_srp_target(shost);
846 target->shost = shost; 843 target->shost = shost;
@@ -872,6 +869,10 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
872 if (err) 869 if (err)
873 goto destroy_queue; 870 goto destroy_queue;
874 871
872 err = scsi_tgt_alloc_queue(shost);
873 if (err)
874 goto destroy_queue;
875
875 return 0; 876 return 0;
876destroy_queue: 877destroy_queue:
877 crq_queue_destroy(target); 878 crq_queue_destroy(target);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bb152fb9fec7..7ed568f180ae 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1576,7 +1576,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1576 METHOD_TRACE("ips_make_passthru", 1); 1576 METHOD_TRACE("ips_make_passthru", 1);
1577 1577
1578 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) 1578 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1579 length += sg[i].length; 1579 length += sg->length;
1580 1580
1581 if (length < sizeof (ips_passthru_t)) { 1581 if (length < sizeof (ips_passthru_t)) {
1582 /* wrong size */ 1582 /* wrong size */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 59f8445eab0d..bdd7de7da39a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1708,8 +1708,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1708 qdepth = ISCSI_DEF_CMD_PER_LUN; 1708 qdepth = ISCSI_DEF_CMD_PER_LUN;
1709 } 1709 }
1710 1710
1711 if (!is_power_of_2(cmds_max) || 1711 if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
1712 cmds_max >= ISCSI_MGMT_ITT_OFFSET) { 1712 cmds_max < 2) {
1713 if (cmds_max != 0) 1713 if (cmds_max != 0)
1714 printk(KERN_ERR "iscsi: invalid can_queue of %d. " 1714 printk(KERN_ERR "iscsi: invalid can_queue of %d. "
1715 "can_queue must be a power of 2 and between " 1715 "can_queue must be a power of 2 and between "
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 7cd05b599a12..b0e5ac372a32 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -236,12 +236,12 @@ static void sas_ata_phy_reset(struct ata_port *ap)
236 struct domain_device *dev = ap->private_data; 236 struct domain_device *dev = ap->private_data;
237 struct sas_internal *i = 237 struct sas_internal *i =
238 to_sas_internal(dev->port->ha->core.shost->transportt); 238 to_sas_internal(dev->port->ha->core.shost->transportt);
239 int res = 0; 239 int res = TMF_RESP_FUNC_FAILED;
240 240
241 if (i->dft->lldd_I_T_nexus_reset) 241 if (i->dft->lldd_I_T_nexus_reset)
242 res = i->dft->lldd_I_T_nexus_reset(dev); 242 res = i->dft->lldd_I_T_nexus_reset(dev);
243 243
244 if (res) 244 if (res != TMF_RESP_FUNC_COMPLETE)
245 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); 245 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__);
246 246
247 switch (dev->sata_dev.command_set) { 247 switch (dev->sata_dev.command_set) {
@@ -656,21 +656,6 @@ out:
656 return res; 656 return res;
657} 657}
658 658
659static void sas_sata_propagate_sas_addr(struct domain_device *dev)
660{
661 unsigned long flags;
662 struct asd_sas_port *port = dev->port;
663 struct asd_sas_phy *phy;
664
665 BUG_ON(dev->parent);
666
667 memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
668 spin_lock_irqsave(&port->phy_list_lock, flags);
669 list_for_each_entry(phy, &port->phy_list, port_phy_el)
670 memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE);
671 spin_unlock_irqrestore(&port->phy_list_lock, flags);
672}
673
674#define ATA_IDENTIFY_DEV 0xEC 659#define ATA_IDENTIFY_DEV 0xEC
675#define ATA_IDENTIFY_PACKET_DEV 0xA1 660#define ATA_IDENTIFY_PACKET_DEV 0xA1
676#define ATA_SET_FEATURES 0xEF 661#define ATA_SET_FEATURES 0xEF
@@ -728,26 +713,6 @@ static int sas_discover_sata_dev(struct domain_device *dev)
728 goto out_err; 713 goto out_err;
729 } 714 }
730cont1: 715cont1:
731 /* Get WWN */
732 if (dev->port->oob_mode != SATA_OOB_MODE) {
733 memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr,
734 SAS_ADDR_SIZE);
735 } else if (dev->sata_dev.command_set == ATA_COMMAND_SET &&
736 (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000)
737 == 0x5000) {
738 int i;
739
740 for (i = 0; i < 4; i++) {
741 dev->sas_addr[2*i] =
742 (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8;
743 dev->sas_addr[2*i+1] =
744 le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF;
745 }
746 }
747 sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
748 if (!dev->parent)
749 sas_sata_propagate_sas_addr(dev);
750
751 /* XXX Hint: register this SATA device with SATL. 716 /* XXX Hint: register this SATA device with SATL.
752 When this returns, dev->sata_dev->lu is alive and 717 When this returns, dev->sata_dev->lu is alive and
753 present. 718 present.
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index e1e2d085c920..39ae68a3b0ef 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -92,9 +92,6 @@ static void sas_form_port(struct asd_sas_phy *phy)
92 if (!port->phy) 92 if (!port->phy)
93 port->phy = phy->phy; 93 port->phy = phy->phy;
94 94
95 SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id,
96 port->id, port->phy_mask);
97
98 if (*(u64 *)port->attached_sas_addr == 0) { 95 if (*(u64 *)port->attached_sas_addr == 0) {
99 port->class = phy->class; 96 port->class = phy->class;
100 memcpy(port->attached_sas_addr, phy->attached_sas_addr, 97 memcpy(port->attached_sas_addr, phy->attached_sas_addr,
@@ -115,6 +112,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
115 } 112 }
116 sas_port_add_phy(port->port, phy->phy); 113 sas_port_add_phy(port->port, phy->phy);
117 114
115 SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n",
116 phy->phy->dev.bus_id,port->port->dev.bus_id,
117 port->phy_mask,
118 SAS_ADDR(port->attached_sas_addr));
119
118 if (port->port_dev) 120 if (port->port_dev)
119 port->port_dev->pathways = port->num_phys; 121 port->port_dev->pathways = port->num_phys;
120 122
@@ -255,12 +257,11 @@ void sas_porte_hard_reset(struct work_struct *work)
255static void sas_init_port(struct asd_sas_port *port, 257static void sas_init_port(struct asd_sas_port *port,
256 struct sas_ha_struct *sas_ha, int i) 258 struct sas_ha_struct *sas_ha, int i)
257{ 259{
260 memset(port, 0, sizeof(*port));
258 port->id = i; 261 port->id = i;
259 INIT_LIST_HEAD(&port->dev_list); 262 INIT_LIST_HEAD(&port->dev_list);
260 spin_lock_init(&port->phy_list_lock); 263 spin_lock_init(&port->phy_list_lock);
261 INIT_LIST_HEAD(&port->phy_list); 264 INIT_LIST_HEAD(&port->phy_list);
262 port->num_phys = 0;
263 port->phy_mask = 0;
264 port->ha = sas_ha; 265 port->ha = sas_ha;
265 266
266 spin_lock_init(&port->dev_list_lock); 267 spin_lock_init(&port->dev_list_lock);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f869fba86807..1f8241563c6c 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -51,10 +51,14 @@ static void sas_scsi_task_done(struct sas_task *task)
51{ 51{
52 struct task_status_struct *ts = &task->task_status; 52 struct task_status_struct *ts = &task->task_status;
53 struct scsi_cmnd *sc = task->uldd_task; 53 struct scsi_cmnd *sc = task->uldd_task;
54 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
55 unsigned ts_flags = task->task_state_flags;
56 int hs = 0, stat = 0; 54 int hs = 0, stat = 0;
57 55
56 if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
57 /* Aborted tasks will be completed by the error handler */
58 SAS_DPRINTK("task done but aborted\n");
59 return;
60 }
61
58 if (unlikely(!sc)) { 62 if (unlikely(!sc)) {
59 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); 63 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
60 list_del_init(&task->list); 64 list_del_init(&task->list);
@@ -120,11 +124,7 @@ static void sas_scsi_task_done(struct sas_task *task)
120 sc->result = (hs << 16) | stat; 124 sc->result = (hs << 16) | stat;
121 list_del_init(&task->list); 125 list_del_init(&task->list);
122 sas_free_task(task); 126 sas_free_task(task);
123 /* This is very ugly but this is how SCSI Core works. */ 127 sc->scsi_done(sc);
124 if (ts_flags & SAS_TASK_STATE_ABORTED)
125 scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
126 else
127 sc->scsi_done(sc);
128} 128}
129 129
130static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) 130static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
@@ -255,13 +255,34 @@ out:
255 return res; 255 return res;
256} 256}
257 257
258static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
259{
260 struct sas_task *task = TO_SAS_TASK(cmd);
261 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
262
263 /* remove the aborted task flag to allow the task to be
264 * completed now. At this point, we only get called following
265 * an actual abort of the task, so we should be guaranteed not
266 * to be racing with any completions from the LLD (hence we
267 * don't need the task state lock to clear the flag) */
268 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
269 /* Now call task_done. However, task will be free'd after
270 * this */
271 task->task_done(task);
272 /* now finish the command and move it on to the error
273 * handler done list, this also takes it off the
274 * error handler pending list */
275 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
276}
277
258static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 278static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
259{ 279{
260 struct scsi_cmnd *cmd, *n; 280 struct scsi_cmnd *cmd, *n;
261 281
262 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 282 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
263 if (cmd == my_cmd) 283 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
264 list_del_init(&cmd->eh_entry); 284 cmd->device->lun == my_cmd->device->lun)
285 sas_eh_finish_cmd(cmd);
265 } 286 }
266} 287}
267 288
@@ -274,7 +295,7 @@ static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
274 struct domain_device *x = cmd_to_domain_dev(cmd); 295 struct domain_device *x = cmd_to_domain_dev(cmd);
275 296
276 if (x == dev) 297 if (x == dev)
277 list_del_init(&cmd->eh_entry); 298 sas_eh_finish_cmd(cmd);
278 } 299 }
279} 300}
280 301
@@ -288,7 +309,7 @@ static void sas_scsi_clear_queue_port(struct list_head *error_q,
288 struct asd_sas_port *x = dev->port; 309 struct asd_sas_port *x = dev->port;
289 310
290 if (x == port) 311 if (x == port)
291 list_del_init(&cmd->eh_entry); 312 sas_eh_finish_cmd(cmd);
292 } 313 }
293} 314}
294 315
@@ -413,7 +434,7 @@ static int sas_recover_I_T(struct domain_device *dev)
413} 434}
414 435
415/* Find the sas_phy that's attached to this device */ 436/* Find the sas_phy that's attached to this device */
416static struct sas_phy *find_local_sas_phy(struct domain_device *dev) 437struct sas_phy *sas_find_local_phy(struct domain_device *dev)
417{ 438{
418 struct domain_device *pdev = dev->parent; 439 struct domain_device *pdev = dev->parent;
419 struct ex_phy *exphy = NULL; 440 struct ex_phy *exphy = NULL;
@@ -435,6 +456,7 @@ static struct sas_phy *find_local_sas_phy(struct domain_device *dev)
435 BUG_ON(!exphy); 456 BUG_ON(!exphy);
436 return exphy->phy; 457 return exphy->phy;
437} 458}
459EXPORT_SYMBOL_GPL(sas_find_local_phy);
438 460
439/* Attempt to send a LUN reset message to a device */ 461/* Attempt to send a LUN reset message to a device */
440int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) 462int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
@@ -461,7 +483,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
461int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) 483int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
462{ 484{
463 struct domain_device *dev = cmd_to_domain_dev(cmd); 485 struct domain_device *dev = cmd_to_domain_dev(cmd);
464 struct sas_phy *phy = find_local_sas_phy(dev); 486 struct sas_phy *phy = sas_find_local_phy(dev);
465 int res; 487 int res;
466 488
467 res = sas_phy_reset(phy, 1); 489 res = sas_phy_reset(phy, 1);
@@ -476,10 +498,10 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
476} 498}
477 499
478/* Try to reset a device */ 500/* Try to reset a device */
479static int try_to_reset_cmd_device(struct Scsi_Host *shost, 501static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
480 struct scsi_cmnd *cmd)
481{ 502{
482 int res; 503 int res;
504 struct Scsi_Host *shost = cmd->device->host;
483 505
484 if (!shost->hostt->eh_device_reset_handler) 506 if (!shost->hostt->eh_device_reset_handler)
485 goto try_bus_reset; 507 goto try_bus_reset;
@@ -519,6 +541,12 @@ Again:
519 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; 541 need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
520 spin_unlock_irqrestore(&task->task_state_lock, flags); 542 spin_unlock_irqrestore(&task->task_state_lock, flags);
521 543
544 if (need_reset) {
545 SAS_DPRINTK("%s: task 0x%p requests reset\n",
546 __FUNCTION__, task);
547 goto reset;
548 }
549
522 SAS_DPRINTK("trying to find task 0x%p\n", task); 550 SAS_DPRINTK("trying to find task 0x%p\n", task);
523 res = sas_scsi_find_task(task); 551 res = sas_scsi_find_task(task);
524 552
@@ -528,28 +556,23 @@ Again:
528 case TASK_IS_DONE: 556 case TASK_IS_DONE:
529 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 557 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
530 task); 558 task);
531 task->task_done(task); 559 sas_eh_finish_cmd(cmd);
532 if (need_reset)
533 try_to_reset_cmd_device(shost, cmd);
534 continue; 560 continue;
535 case TASK_IS_ABORTED: 561 case TASK_IS_ABORTED:
536 SAS_DPRINTK("%s: task 0x%p is aborted\n", 562 SAS_DPRINTK("%s: task 0x%p is aborted\n",
537 __FUNCTION__, task); 563 __FUNCTION__, task);
538 task->task_done(task); 564 sas_eh_finish_cmd(cmd);
539 if (need_reset)
540 try_to_reset_cmd_device(shost, cmd);
541 continue; 565 continue;
542 case TASK_IS_AT_LU: 566 case TASK_IS_AT_LU:
543 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 567 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
568 reset:
544 tmf_resp = sas_recover_lu(task->dev, cmd); 569 tmf_resp = sas_recover_lu(task->dev, cmd);
545 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 570 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
546 SAS_DPRINTK("dev %016llx LU %x is " 571 SAS_DPRINTK("dev %016llx LU %x is "
547 "recovered\n", 572 "recovered\n",
548 SAS_ADDR(task->dev), 573 SAS_ADDR(task->dev),
549 cmd->device->lun); 574 cmd->device->lun);
550 task->task_done(task); 575 sas_eh_finish_cmd(cmd);
551 if (need_reset)
552 try_to_reset_cmd_device(shost, cmd);
553 sas_scsi_clear_queue_lu(work_q, cmd); 576 sas_scsi_clear_queue_lu(work_q, cmd);
554 goto Again; 577 goto Again;
555 } 578 }
@@ -560,15 +583,15 @@ Again:
560 task); 583 task);
561 tmf_resp = sas_recover_I_T(task->dev); 584 tmf_resp = sas_recover_I_T(task->dev);
562 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 585 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
586 struct domain_device *dev = task->dev;
563 SAS_DPRINTK("I_T %016llx recovered\n", 587 SAS_DPRINTK("I_T %016llx recovered\n",
564 SAS_ADDR(task->dev->sas_addr)); 588 SAS_ADDR(task->dev->sas_addr));
565 task->task_done(task); 589 sas_eh_finish_cmd(cmd);
566 if (need_reset) 590 sas_scsi_clear_queue_I_T(work_q, dev);
567 try_to_reset_cmd_device(shost, cmd);
568 sas_scsi_clear_queue_I_T(work_q, task->dev);
569 goto Again; 591 goto Again;
570 } 592 }
571 /* Hammer time :-) */ 593 /* Hammer time :-) */
594 try_to_reset_cmd_device(cmd);
572 if (i->dft->lldd_clear_nexus_port) { 595 if (i->dft->lldd_clear_nexus_port) {
573 struct asd_sas_port *port = task->dev->port; 596 struct asd_sas_port *port = task->dev->port;
574 SAS_DPRINTK("clearing nexus for port:%d\n", 597 SAS_DPRINTK("clearing nexus for port:%d\n",
@@ -577,9 +600,7 @@ Again:
577 if (res == TMF_RESP_FUNC_COMPLETE) { 600 if (res == TMF_RESP_FUNC_COMPLETE) {
578 SAS_DPRINTK("clear nexus port:%d " 601 SAS_DPRINTK("clear nexus port:%d "
579 "succeeded\n", port->id); 602 "succeeded\n", port->id);
580 task->task_done(task); 603 sas_eh_finish_cmd(cmd);
581 if (need_reset)
582 try_to_reset_cmd_device(shost, cmd);
583 sas_scsi_clear_queue_port(work_q, 604 sas_scsi_clear_queue_port(work_q,
584 port); 605 port);
585 goto Again; 606 goto Again;
@@ -591,10 +612,8 @@ Again:
591 if (res == TMF_RESP_FUNC_COMPLETE) { 612 if (res == TMF_RESP_FUNC_COMPLETE) {
592 SAS_DPRINTK("clear nexus ha " 613 SAS_DPRINTK("clear nexus ha "
593 "succeeded\n"); 614 "succeeded\n");
594 task->task_done(task); 615 sas_eh_finish_cmd(cmd);
595 if (need_reset) 616 goto clear_q;
596 try_to_reset_cmd_device(shost, cmd);
597 goto out;
598 } 617 }
599 } 618 }
600 /* If we are here -- this means that no amount 619 /* If we are here -- this means that no amount
@@ -606,21 +625,16 @@ Again:
606 SAS_ADDR(task->dev->sas_addr), 625 SAS_ADDR(task->dev->sas_addr),
607 cmd->device->lun); 626 cmd->device->lun);
608 627
609 task->task_done(task); 628 sas_eh_finish_cmd(cmd);
610 if (need_reset)
611 try_to_reset_cmd_device(shost, cmd);
612 goto clear_q; 629 goto clear_q;
613 } 630 }
614 } 631 }
615out:
616 return list_empty(work_q); 632 return list_empty(work_q);
617clear_q: 633clear_q:
618 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); 634 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
619 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 635 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
620 struct sas_task *task = TO_SAS_TASK(cmd); 636 sas_eh_finish_cmd(cmd);
621 list_del_init(&cmd->eh_entry); 637
622 task->task_done(task);
623 }
624 return list_empty(work_q); 638 return list_empty(work_q);
625} 639}
626 640
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 848d97744b4d..0819f5f39de5 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -55,7 +55,6 @@ void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 55void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); 56void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
57void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 57void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58void lpfc_disable_node(struct lpfc_vport *, struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, 58struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
60 struct lpfc_nodelist *, int); 59 struct lpfc_nodelist *, int);
61void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); 60void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bd572d6b60af..976653440fba 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1694,7 +1694,7 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1694 NLP_STE_UNUSED_NODE); 1694 NLP_STE_UNUSED_NODE);
1695} 1695}
1696 1696
1697void 1697static void
1698lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1698lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1699{ 1699{
1700 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1700 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f53206411cd8..fc0d9501aba6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -648,28 +648,24 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
648 unsigned long flags; 648 unsigned long flags;
649 struct hbq_dmabuf *hbq_buffer; 649 struct hbq_dmabuf *hbq_buffer;
650 650
651 if (!phba->hbqs[hbqno].hbq_alloc_buffer) { 651 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
652 return 0; 652 return 0;
653 }
654 653
655 start = phba->hbqs[hbqno].buffer_count; 654 start = phba->hbqs[hbqno].buffer_count;
656 end = count + start; 655 end = count + start;
657 if (end > lpfc_hbq_defs[hbqno]->entry_count) { 656 if (end > lpfc_hbq_defs[hbqno]->entry_count)
658 end = lpfc_hbq_defs[hbqno]->entry_count; 657 end = lpfc_hbq_defs[hbqno]->entry_count;
659 }
660 658
661 /* Check whether HBQ is still in use */ 659 /* Check whether HBQ is still in use */
662 spin_lock_irqsave(&phba->hbalock, flags); 660 spin_lock_irqsave(&phba->hbalock, flags);
663 if (!phba->hbq_in_use) { 661 if (!phba->hbq_in_use)
664 spin_unlock_irqrestore(&phba->hbalock, flags); 662 goto out;
665 return 0;
666 }
667 663
668 /* Populate HBQ entries */ 664 /* Populate HBQ entries */
669 for (i = start; i < end; i++) { 665 for (i = start; i < end; i++) {
670 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 666 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
671 if (!hbq_buffer) 667 if (!hbq_buffer)
672 return 1; 668 goto err;
673 hbq_buffer->tag = (i | (hbqno << 16)); 669 hbq_buffer->tag = (i | (hbqno << 16));
674 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 670 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
675 phba->hbqs[hbqno].buffer_count++; 671 phba->hbqs[hbqno].buffer_count++;
@@ -677,8 +673,12 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
677 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 673 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
678 } 674 }
679 675
676 out:
680 spin_unlock_irqrestore(&phba->hbalock, flags); 677 spin_unlock_irqrestore(&phba->hbalock, flags);
681 return 0; 678 return 0;
679 err:
680 spin_unlock_irqrestore(&phba->hbalock, flags);
681 return 1;
682} 682}
683 683
684int 684int
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 4d59ae8491a4..b135a1ed4b2c 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -151,19 +151,19 @@ mega_setup_mailbox(adapter_t *adapter)
151 */ 151 */
152 if( adapter->flag & BOARD_IOMAP ) { 152 if( adapter->flag & BOARD_IOMAP ) {
153 153
154 outb_p(adapter->mbox_dma & 0xFF, 154 outb(adapter->mbox_dma & 0xFF,
155 adapter->host->io_port + MBOX_PORT0); 155 adapter->host->io_port + MBOX_PORT0);
156 156
157 outb_p((adapter->mbox_dma >> 8) & 0xFF, 157 outb((adapter->mbox_dma >> 8) & 0xFF,
158 adapter->host->io_port + MBOX_PORT1); 158 adapter->host->io_port + MBOX_PORT1);
159 159
160 outb_p((adapter->mbox_dma >> 16) & 0xFF, 160 outb((adapter->mbox_dma >> 16) & 0xFF,
161 adapter->host->io_port + MBOX_PORT2); 161 adapter->host->io_port + MBOX_PORT2);
162 162
163 outb_p((adapter->mbox_dma >> 24) & 0xFF, 163 outb((adapter->mbox_dma >> 24) & 0xFF,
164 adapter->host->io_port + MBOX_PORT3); 164 adapter->host->io_port + MBOX_PORT3);
165 165
166 outb_p(ENABLE_MBOX_BYTE, 166 outb(ENABLE_MBOX_BYTE,
167 adapter->host->io_port + ENABLE_MBOX_REGION); 167 adapter->host->io_port + ENABLE_MBOX_REGION);
168 168
169 irq_ack(adapter); 169 irq_ack(adapter);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 651d09b08f2a..fd63b06d9ef1 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1759,6 +1759,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1759 1759
1760 switch (mesg.event) { 1760 switch (mesg.event) {
1761 case PM_EVENT_SUSPEND: 1761 case PM_EVENT_SUSPEND:
1762 case PM_EVENT_HIBERNATE:
1762 case PM_EVENT_FREEZE: 1763 case PM_EVENT_FREEZE:
1763 break; 1764 break;
1764 default: 1765 default:
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
new file mode 100644
index 000000000000..5ec0665b3a3d
--- /dev/null
+++ b/drivers/scsi/mvsas.c
@@ -0,0 +1,2969 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/spinlock.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/ctype.h>
39#include <scsi/libsas.h>
40#include <asm/io.h>
41
42#define DRV_NAME "mvsas"
43#define DRV_VERSION "0.5.1"
44#define _MV_DUMP 0
45#define MVS_DISABLE_NVRAM
46#define MVS_DISABLE_MSI
47
48#define mr32(reg) readl(regs + MVS_##reg)
49#define mw32(reg,val) writel((val), regs + MVS_##reg)
50#define mw32_f(reg,val) do { \
51 writel((val), regs + MVS_##reg); \
52 readl(regs + MVS_##reg); \
53 } while (0)
54
55#define MVS_ID_NOT_MAPPED 0xff
56#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
57
58/* offset for D2H FIS in the Received FIS List Structure */
59#define SATA_RECEIVED_D2H_FIS(reg_set) \
60 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
61#define SATA_RECEIVED_PIO_FIS(reg_set) \
62 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
63#define UNASSOC_D2H_FIS(id) \
64 ((void *) mvi->rx_fis + 0x100 * id)
65
66#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
67 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
68 (__mc) != 0 && __rest; \
69 (++__lseq), (__mc) >>= 1)
70
71/* driver compile-time configuration */
72enum driver_configuration {
73 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
74 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
75 /* software requires power-of-2
76 ring size */
77
78 MVS_SLOTS = 512, /* command slots */
79 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
80 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
81 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
82 MVS_OAF_SZ = 64, /* Open address frame buffer size */
83
84 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
85
86 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
87};
88
89/* unchangeable hardware details */
90enum hardware_details {
91 MVS_MAX_PHYS = 8, /* max. possible phys */
92 MVS_MAX_PORTS = 8, /* max. possible ports */
93 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
94};
95
96/* peripheral registers (BAR2) */
97enum peripheral_registers {
98 SPI_CTL = 0x10, /* EEPROM control */
99 SPI_CMD = 0x14, /* EEPROM command */
100 SPI_DATA = 0x18, /* EEPROM data */
101};
102
103enum peripheral_register_bits {
104 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
105 TWSI_RD = (1U << 4), /* EEPROM read access */
106
107 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
108};
109
110/* enhanced mode registers (BAR4) */
111enum hw_registers {
112 MVS_GBL_CTL = 0x04, /* global control */
113 MVS_GBL_INT_STAT = 0x08, /* global irq status */
114 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
115 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
116
117 MVS_CTL = 0x100, /* SAS/SATA port configuration */
118 MVS_PCS = 0x104, /* SAS/SATA port control/status */
119 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
120 MVS_CMD_LIST_HI = 0x10C,
121 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
122 MVS_RX_FIS_HI = 0x114,
123
124 MVS_TX_CFG = 0x120, /* TX configuration */
125 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
126 MVS_TX_HI = 0x128,
127
128 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
129 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
130 MVS_RX_CFG = 0x134, /* RX configuration */
131 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
132 MVS_RX_HI = 0x13C,
133 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
134
135 MVS_INT_COAL = 0x148, /* Int coalescing config */
136 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
137 MVS_INT_STAT = 0x150, /* Central int status */
138 MVS_INT_MASK = 0x154, /* Central int enable */
139 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
140 MVS_INT_MASK_SRS = 0x15C,
141
142 /* ports 1-3 follow after this */
143 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
144 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
145 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
146 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
147
148 /* ports 1-3 follow after this */
149 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
150 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
151
152 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
153 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
154
155 /* ports 1-3 follow after this */
156 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
157 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
158 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
159 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
160
161 /* ports 1-3 follow after this */
162 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
163 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
164 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
165 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
166};
167
168enum hw_register_bits {
169 /* MVS_GBL_CTL */
170 INT_EN = (1U << 1), /* Global int enable */
171 HBA_RST = (1U << 0), /* HBA reset */
172
173 /* MVS_GBL_INT_STAT */
174 INT_XOR = (1U << 4), /* XOR engine event */
175 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
176
177 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
178 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
179 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
180 MODE_AUTO_DET_PORT6 = (1U << 14),
181 MODE_AUTO_DET_PORT5 = (1U << 13),
182 MODE_AUTO_DET_PORT4 = (1U << 12),
183 MODE_AUTO_DET_PORT3 = (1U << 11),
184 MODE_AUTO_DET_PORT2 = (1U << 10),
185 MODE_AUTO_DET_PORT1 = (1U << 9),
186 MODE_AUTO_DET_PORT0 = (1U << 8),
187 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
188 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
189 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
190 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
191 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
192 MODE_SAS_PORT6_MASK = (1U << 6),
193 MODE_SAS_PORT5_MASK = (1U << 5),
194 MODE_SAS_PORT4_MASK = (1U << 4),
195 MODE_SAS_PORT3_MASK = (1U << 3),
196 MODE_SAS_PORT2_MASK = (1U << 2),
197 MODE_SAS_PORT1_MASK = (1U << 1),
198 MODE_SAS_PORT0_MASK = (1U << 0),
199 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
200 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
201 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
202 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
203
204 /* SAS_MODE value may be
205 * dictated (in hw) by values
206 * of SATA_TARGET & AUTO_DET
207 */
208
209 /* MVS_TX_CFG */
210 TX_EN = (1U << 16), /* Enable TX */
211 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
212
213 /* MVS_RX_CFG */
214 RX_EN = (1U << 16), /* Enable RX */
215 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
216
217 /* MVS_INT_COAL */
218 COAL_EN = (1U << 16), /* Enable int coalescing */
219
220 /* MVS_INT_STAT, MVS_INT_MASK */
221 CINT_I2C = (1U << 31), /* I2C event */
222 CINT_SW0 = (1U << 30), /* software event 0 */
223 CINT_SW1 = (1U << 29), /* software event 1 */
224 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
225 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
226 CINT_MEM = (1U << 26), /* int mem parity err */
227 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
228 CINT_SRS = (1U << 3), /* SRS event */
229 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
230 CINT_DONE = (1U << 0), /* cmd completion */
231
232 /* shl for ports 1-3 */
233 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
234 CINT_PORT = (1U << 8), /* port0 event */
235 CINT_PORT_MASK_OFFSET = 8,
236 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
237
238 /* TX (delivery) ring bits */
239 TXQ_CMD_SHIFT = 29,
240 TXQ_CMD_SSP = 1, /* SSP protocol */
241 TXQ_CMD_SMP = 2, /* SMP protocol */
242 TXQ_CMD_STP = 3, /* STP/SATA protocol */
243 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
244 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
245 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
246 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
247 TXQ_SRS_SHIFT = 20, /* SATA register set */
248 TXQ_SRS_MASK = 0x7f,
249 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
250 TXQ_PHY_MASK = 0xff,
251 TXQ_SLOT_MASK = 0xfff, /* slot number */
252
253 /* RX (completion) ring bits */
254 RXQ_GOOD = (1U << 23), /* Response good */
255 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
256 RXQ_CMD_RX = (1U << 20), /* target cmd received */
257 RXQ_ATTN = (1U << 19), /* attention */
258 RXQ_RSP = (1U << 18), /* response frame xfer'd */
259 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
260 RXQ_DONE = (1U << 16), /* cmd complete */
261 RXQ_SLOT_MASK = 0xfff, /* slot number */
262
263 /* mvs_cmd_hdr bits */
264 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
265 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
266
267 /* SSP initiator only */
268 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
269
270 /* SSP initiator or target */
271 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
272
273 /* SSP target only */
274 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
275 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
276 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
277 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
278
279 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
280 MCH_FBURST = (1U << 11), /* first burst (SSP) */
281 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
282 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
283 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
284 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
285 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
286 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
287 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
288 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
289
290 CCTL_RST = (1U << 5), /* port logic reset */
291
292 /* 0(LSB first), 1(MSB first) */
293 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
294 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
295 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
296 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
297
298 /* MVS_Px_SER_CTLSTAT (per-phy control) */
299 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
300 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
301 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
302 PHY_RST = (1U << 0), /* phy reset */
303 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
304 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
305 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
306 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
307 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
308 PHY_READY_MASK = (1U << 20),
309
310 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
311 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
312 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
313 PHYEV_AN = (1U << 18), /* SATA async notification */
314 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
315 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
316 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
317 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
318 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
319 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
320 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
321 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
322 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
323 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
324 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
325 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
326 PHYEV_ID_DONE = (1U << 2), /* identify done */
327 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
328 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
329
330 /* MVS_PCS */
331 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
332 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
333 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
334 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
335 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
336 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
337 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
338 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
339 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
340 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
341
342 /* Port n Attached Device Info */
343 PORT_DEV_SSP_TRGT = (1U << 19),
344 PORT_DEV_SMP_TRGT = (1U << 18),
345 PORT_DEV_STP_TRGT = (1U << 17),
346 PORT_DEV_SSP_INIT = (1U << 11),
347 PORT_DEV_SMP_INIT = (1U << 10),
348 PORT_DEV_STP_INIT = (1U << 9),
349 PORT_PHY_ID_MASK = (0xFFU << 24),
350 PORT_DEV_TRGT_MASK = (0x7U << 17),
351 PORT_DEV_INIT_MASK = (0x7U << 9),
352 PORT_DEV_TYPE_MASK = (0x7U << 0),
353
354 /* Port n PHY Status */
355 PHY_RDY = (1U << 2),
356 PHY_DW_SYNC = (1U << 1),
357 PHY_OOB_DTCTD = (1U << 0),
358
359 /* VSR */
360 /* PHYMODE 6 (CDB) */
361 PHY_MODE6_DTL_SPEED = (1U << 27),
362};
363
364enum mvs_info_flags {
365 MVF_MSI = (1U << 0), /* MSI is enabled */
366 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
367};
368
369enum sas_cmd_port_registers {
370 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
371 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
372 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
373 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
374 CMD_OOB_SPACE = 0x110, /* OOB space control register */
375 CMD_OOB_BURST = 0x114, /* OOB burst control register */
376 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
377 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
378 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
379 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
380 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
381 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
382 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
383 CMD_ID_TEST = 0x134, /* ID test register */
384 CMD_PL_TIMER = 0x138, /* PL timer register */
385 CMD_WD_TIMER = 0x13c, /* WD timer register */
386 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
387 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
388 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
389 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
390 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
391 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
392 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
393 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
394 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
395 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
396 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
397 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
398 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
399 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
400 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
401 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
402 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
403 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
404 CMD_RESET_COUNT = 0x188, /* Reset Count */
405 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
406 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
407 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
408 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
409 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
410 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
411 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
412 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
413 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
414 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
415 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
416 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
417 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
418 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
419 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
420};
421
422/* SAS/SATA configuration port registers, aka phy registers */
423enum sas_sata_config_port_regs {
424 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
425 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
426 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
427 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
428 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
429 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
430 PHYR_SATA_CTL = 0x18, /* SATA control */
431 PHYR_PHY_STAT = 0x1C, /* PHY status */
432 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
433 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
434 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
435 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
436 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
437 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
438 PHYR_WIDE_PORT = 0x38, /* wide port participating */
439 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
440 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
441 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
442};
443
444/* SAS/SATA Vendor Specific Port Registers */
445enum sas_sata_vsp_regs {
446 VSR_PHY_STAT = 0x00, /* Phy Status */
447 VSR_PHY_MODE1 = 0x01, /* phy tx */
448 VSR_PHY_MODE2 = 0x02, /* tx scc */
449 VSR_PHY_MODE3 = 0x03, /* pll */
450 VSR_PHY_MODE4 = 0x04, /* VCO */
451 VSR_PHY_MODE5 = 0x05, /* Rx */
452 VSR_PHY_MODE6 = 0x06, /* CDR */
453 VSR_PHY_MODE7 = 0x07, /* Impedance */
454 VSR_PHY_MODE8 = 0x08, /* Voltage */
455 VSR_PHY_MODE9 = 0x09, /* Test */
456 VSR_PHY_MODE10 = 0x0A, /* Power */
457 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
458 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
459 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
460};
461
462enum pci_cfg_registers {
463 PCR_PHY_CTL = 0x40,
464 PCR_PHY_CTL2 = 0x90,
465 PCR_DEV_CTRL = 0xE8,
466};
467
468enum pci_cfg_register_bits {
469 PCTL_PWR_ON = (0xFU << 24),
470 PCTL_OFF = (0xFU << 12),
471 PRD_REQ_SIZE = (0x4000),
472 PRD_REQ_MASK = (0x00007000),
473};
474
475enum nvram_layout_offsets {
476 NVR_SIG = 0x00, /* 0xAA, 0x55 */
477 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
478};
479
480enum chip_flavors {
481 chip_6320,
482 chip_6440,
483 chip_6480,
484};
485
486enum port_type {
487 PORT_TYPE_SAS = (1L << 1),
488 PORT_TYPE_SATA = (1L << 0),
489};
490
491/* Command Table Format */
492enum ct_format {
493 /* SSP */
494 SSP_F_H = 0x00,
495 SSP_F_IU = 0x18,
496 SSP_F_MAX = 0x4D,
497 /* STP */
498 STP_CMD_FIS = 0x00,
499 STP_ATAPI_CMD = 0x40,
500 STP_F_MAX = 0x10,
501 /* SMP */
502 SMP_F_T = 0x00,
503 SMP_F_DEP = 0x01,
504 SMP_F_MAX = 0x101,
505};
506
507enum status_buffer {
508 SB_EIR_OFF = 0x00, /* Error Information Record */
509 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
510 SB_RFB_MAX = 0x400, /* RFB size*/
511};
512
513enum error_info_rec {
514 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
515};
516
517struct mvs_chip_info {
518 u32 n_phy;
519 u32 srs_sz;
520 u32 slot_width;
521};
522
523struct mvs_err_info {
524 __le32 flags;
525 __le32 flags2;
526};
527
528struct mvs_prd {
529 __le64 addr; /* 64-bit buffer address */
530 __le32 reserved;
531 __le32 len; /* 16-bit length */
532};
533
534struct mvs_cmd_hdr {
535 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
536 __le32 lens; /* cmd, max resp frame len */
537 __le32 tags; /* targ port xfer tag; tag */
538 __le32 data_len; /* data xfer len */
539 __le64 cmd_tbl; /* command table address */
540 __le64 open_frame; /* open addr frame address */
541 __le64 status_buf; /* status buffer address */
542 __le64 prd_tbl; /* PRD tbl address */
543 __le32 reserved[4];
544};
545
546struct mvs_slot_info {
547 struct sas_task *task;
548 u32 n_elem;
549 u32 tx;
550
551 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
552 * and PRD table
553 */
554 void *buf;
555 dma_addr_t buf_dma;
556#if _MV_DUMP
557 u32 cmd_size;
558#endif
559
560 void *response;
561};
562
563struct mvs_port {
564 struct asd_sas_port sas_port;
565 u8 port_attached;
566 u8 taskfileset;
567 u8 wide_port_phymap;
568};
569
570struct mvs_phy {
571 struct mvs_port *port;
572 struct asd_sas_phy sas_phy;
573 struct sas_identify identify;
574 struct scsi_device *sdev;
575 u64 dev_sas_addr;
576 u64 att_dev_sas_addr;
577 u32 att_dev_info;
578 u32 dev_info;
579 u32 phy_type;
580 u32 phy_status;
581 u32 irq_status;
582 u32 frame_rcvd_size;
583 u8 frame_rcvd[32];
584 u8 phy_attached;
585};
586
587struct mvs_info {
588 unsigned long flags;
589
590 spinlock_t lock; /* host-wide lock */
591 struct pci_dev *pdev; /* our device */
592 void __iomem *regs; /* enhanced mode registers */
593 void __iomem *peri_regs; /* peripheral registers */
594
595 u8 sas_addr[SAS_ADDR_SIZE];
596 struct sas_ha_struct sas; /* SCSI/SAS glue */
597 struct Scsi_Host *shost;
598
599 __le32 *tx; /* TX (delivery) DMA ring */
600 dma_addr_t tx_dma;
601 u32 tx_prod; /* cached next-producer idx */
602
603 __le32 *rx; /* RX (completion) DMA ring */
604 dma_addr_t rx_dma;
605 u32 rx_cons; /* RX consumer idx */
606
607 __le32 *rx_fis; /* RX'd FIS area */
608 dma_addr_t rx_fis_dma;
609
610 struct mvs_cmd_hdr *slot; /* DMA command header slots */
611 dma_addr_t slot_dma;
612
613 const struct mvs_chip_info *chip;
614
615 unsigned long tags[MVS_SLOTS];
616 struct mvs_slot_info slot_info[MVS_SLOTS];
617 /* further per-slot information */
618 struct mvs_phy phy[MVS_MAX_PHYS];
619 struct mvs_port port[MVS_MAX_PHYS];
620
621 u32 can_queue; /* per adapter */
622 u32 tag_out; /*Get*/
623 u32 tag_in; /*Give*/
624};
625
626struct mvs_queue_task {
627 struct list_head list;
628
629 void *uldd_task;
630};
631
632static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
633 void *funcdata);
634static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
635static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
636static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
637static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
638static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
639static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
640
641static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
642static void mvs_detect_porttype(struct mvs_info *mvi, int i);
643static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
644
645static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
646static void mvs_scan_start(struct Scsi_Host *);
647static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev);
648
649static struct scsi_transport_template *mvs_stt;
650
651static const struct mvs_chip_info mvs_chips[] = {
652 [chip_6320] = { 2, 16, 9 },
653 [chip_6440] = { 4, 16, 9 },
654 [chip_6480] = { 8, 32, 10 },
655};
656
657static struct scsi_host_template mvs_sht = {
658 .module = THIS_MODULE,
659 .name = DRV_NAME,
660 .queuecommand = sas_queuecommand,
661 .target_alloc = sas_target_alloc,
662 .slave_configure = sas_slave_configure,
663 .slave_destroy = sas_slave_destroy,
664 .scan_finished = mvs_scan_finished,
665 .scan_start = mvs_scan_start,
666 .change_queue_depth = sas_change_queue_depth,
667 .change_queue_type = sas_change_queue_type,
668 .bios_param = sas_bios_param,
669 .can_queue = 1,
670 .cmd_per_lun = 1,
671 .this_id = -1,
672 .sg_tablesize = SG_ALL,
673 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
674 .use_clustering = ENABLE_CLUSTERING,
675 .eh_device_reset_handler = sas_eh_device_reset_handler,
676 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
677 .slave_alloc = mvs_sas_slave_alloc,
678 .target_destroy = sas_target_destroy,
679 .ioctl = sas_ioctl,
680};
681
682static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
683{
684 u32 i;
685 u32 run;
686 u32 offset;
687
688 offset = 0;
689 while (size) {
690 printk("%08X : ", baseaddr + offset);
691 if (size >= 16)
692 run = 16;
693 else
694 run = size;
695 size -= run;
696 for (i = 0; i < 16; i++) {
697 if (i < run)
698 printk("%02X ", (u32)data[i]);
699 else
700 printk(" ");
701 }
702 printk(": ");
703 for (i = 0; i < run; i++)
704 printk("%c", isalnum(data[i]) ? data[i] : '.');
705 printk("\n");
706 data = &data[16];
707 offset += run;
708 }
709 printk("\n");
710}
711
712static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
713 enum sas_protocol proto)
714{
715#if _MV_DUMP
716 u32 offset;
717 struct pci_dev *pdev = mvi->pdev;
718 struct mvs_slot_info *slot = &mvi->slot_info[tag];
719
720 offset = slot->cmd_size + MVS_OAF_SZ +
721 sizeof(struct mvs_prd) * slot->n_elem;
722 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
723 tag);
724 mvs_hexdump(32, (u8 *) slot->response,
725 (u32) slot->buf_dma + offset);
726#endif
727}
728
729static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
730 enum sas_protocol proto)
731{
732#if _MV_DUMP
733 u32 sz, w_ptr, r_ptr;
734 u64 addr;
735 void __iomem *regs = mvi->regs;
736 struct pci_dev *pdev = mvi->pdev;
737 struct mvs_slot_info *slot = &mvi->slot_info[tag];
738
739 /*Delivery Queue */
740 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
741 w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK;
742 r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK;
743 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
744 dev_printk(KERN_DEBUG, &pdev->dev,
745 "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n",
746 sz, w_ptr, r_ptr);
747 dev_printk(KERN_DEBUG, &pdev->dev,
748 "Delivery Queue Base Address=0x%llX (PA)"
749 "(tx_dma=0x%llX), Entry=%04d\n",
750 addr, mvi->tx_dma, w_ptr);
751 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
752 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
753 /*Command List */
754 addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO);
755 dev_printk(KERN_DEBUG, &pdev->dev,
756 "Command List Base Address=0x%llX (PA)"
757 "(slot_dma=0x%llX), Header=%03d\n",
758 addr, mvi->slot_dma, tag);
759 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
760 /*mvs_cmd_hdr */
761 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
762 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
763 /*1.command table area */
764 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
765 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
766 /*2.open address frame area */
767 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
768 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
769 (u32) slot->buf_dma + slot->cmd_size);
770 /*3.status buffer */
771 mvs_hba_sb_dump(mvi, tag, proto);
772 /*4.PRD table */
773 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
774 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
775 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
776 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
777#endif
778}
779
780static void mvs_hba_cq_dump(struct mvs_info *mvi)
781{
782#if _MV_DUMP
783 u64 addr;
784 void __iomem *regs = mvi->regs;
785 struct pci_dev *pdev = mvi->pdev;
786 u32 entry = mvi->rx_cons + 1;
787 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
788
789 /*Completion Queue */
790 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
791 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n",
792 (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
793 dev_printk(KERN_DEBUG, &pdev->dev,
794 "Completion List Base Address=0x%llX (PA), "
795 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
796 addr, entry - 1, mvi->rx[0]);
797 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
798 mvi->rx_dma + sizeof(u32) * entry);
799#endif
800}
801
802static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
803{
804 void __iomem *regs = mvi->regs;
805 u32 tmp;
806
807 tmp = mr32(GBL_CTL);
808
809 mw32(GBL_CTL, tmp | INT_EN);
810}
811
812static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
813{
814 void __iomem *regs = mvi->regs;
815 u32 tmp;
816
817 tmp = mr32(GBL_CTL);
818
819 mw32(GBL_CTL, tmp & ~INT_EN);
820}
821
822static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
823
824/* move to PCI layer or libata core? */
825static int pci_go_64(struct pci_dev *pdev)
826{
827 int rc;
828
829 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
830 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
831 if (rc) {
832 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
833 if (rc) {
834 dev_printk(KERN_ERR, &pdev->dev,
835 "64-bit DMA enable failed\n");
836 return rc;
837 }
838 }
839 } else {
840 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
841 if (rc) {
842 dev_printk(KERN_ERR, &pdev->dev,
843 "32-bit DMA enable failed\n");
844 return rc;
845 }
846 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
847 if (rc) {
848 dev_printk(KERN_ERR, &pdev->dev,
849 "32-bit consistent DMA enable failed\n");
850 return rc;
851 }
852 }
853
854 return rc;
855}
856
857static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
858{
859 mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1);
860 mvi->tags[mvi->tag_in] = tag;
861}
862
863static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
864{
865 mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1);
866}
867
868static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
869{
870 if (mvi->tag_out != mvi->tag_in) {
871 *tag_out = mvi->tags[mvi->tag_out];
872 mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1);
873 return 0;
874 }
875 return -EBUSY;
876}
877
878static void mvs_tag_init(struct mvs_info *mvi)
879{
880 int i;
881 for (i = 0; i < MVS_SLOTS; ++i)
882 mvi->tags[i] = i;
883 mvi->tag_out = 0;
884 mvi->tag_in = MVS_SLOTS - 1;
885}
886
887#ifndef MVS_DISABLE_NVRAM
888static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
889{
890 int timeout = 1000;
891
892 if (addr & ~SPI_ADDR_MASK)
893 return -EINVAL;
894
895 writel(addr, regs + SPI_CMD);
896 writel(TWSI_RD, regs + SPI_CTL);
897
898 while (timeout-- > 0) {
899 if (readl(regs + SPI_CTL) & TWSI_RDY) {
900 *data = readl(regs + SPI_DATA);
901 return 0;
902 }
903
904 udelay(10);
905 }
906
907 return -EBUSY;
908}
909
910static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
911 void *buf, u32 buflen)
912{
913 u32 addr_end, tmp_addr, i, j;
914 u32 tmp = 0;
915 int rc;
916 u8 *tmp8, *buf8 = buf;
917
918 addr_end = addr + buflen;
919 tmp_addr = ALIGN(addr, 4);
920 if (addr > 0xff)
921 return -EINVAL;
922
923 j = addr & 0x3;
924 if (j) {
925 rc = mvs_eep_read(regs, tmp_addr, &tmp);
926 if (rc)
927 return rc;
928
929 tmp8 = (u8 *)&tmp;
930 for (i = j; i < 4; i++)
931 *buf8++ = tmp8[i];
932
933 tmp_addr += 4;
934 }
935
936 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
937 rc = mvs_eep_read(regs, tmp_addr, &tmp);
938 if (rc)
939 return rc;
940
941 memcpy(buf8, &tmp, 4);
942 buf8 += 4;
943 }
944
945 if (tmp_addr < addr_end) {
946 rc = mvs_eep_read(regs, tmp_addr, &tmp);
947 if (rc)
948 return rc;
949
950 tmp8 = (u8 *)&tmp;
951 j = addr_end - tmp_addr;
952 for (i = 0; i < j; i++)
953 *buf8++ = tmp8[i];
954
955 tmp_addr += 4;
956 }
957
958 return 0;
959}
960#endif
961
962static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
963 void *buf, u32 buflen)
964{
965#ifndef MVS_DISABLE_NVRAM
966 void __iomem *regs = mvi->regs;
967 int rc, i;
968 u32 sum;
969 u8 hdr[2], *tmp;
970 const char *msg;
971
972 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
973 if (rc) {
974 msg = "nvram hdr read failed";
975 goto err_out;
976 }
977 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
978 if (rc) {
979 msg = "nvram read failed";
980 goto err_out;
981 }
982
983 if (hdr[0] != 0x5A) {
984 /* entry id */
985 msg = "invalid nvram entry id";
986 rc = -ENOENT;
987 goto err_out;
988 }
989
990 tmp = buf;
991 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
992 for (i = 0; i < buflen; i++)
993 sum += ((u32)tmp[i]);
994
995 if (sum) {
996 msg = "nvram checksum failure";
997 rc = -EILSEQ;
998 goto err_out;
999 }
1000
1001 return 0;
1002
1003err_out:
1004 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1005 return rc;
1006#else
1007 /* FIXME , For SAS target mode */
1008 memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
1009 return 0;
1010#endif
1011}
1012
1013static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1014{
1015 struct mvs_phy *phy = &mvi->phy[i];
1016
1017 if (!phy->phy_attached)
1018 return;
1019
1020 if (phy->phy_type & PORT_TYPE_SAS) {
1021 struct sas_identify_frame *id;
1022
1023 id = (struct sas_identify_frame *)phy->frame_rcvd;
1024 id->dev_type = phy->identify.device_type;
1025 id->initiator_bits = SAS_PROTOCOL_ALL;
1026 id->target_bits = phy->identify.target_port_protocols;
1027 } else if (phy->phy_type & PORT_TYPE_SATA) {
1028 /* TODO */
1029 }
1030 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1031 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1032 PORTE_BYTES_DMAED);
1033}
1034
1035static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1036{
1037 /* give the phy enabling interrupt event time to come in (1s
1038 * is empirically about all it takes) */
1039 if (time < HZ)
1040 return 0;
1041 /* Wait for discovery to finish */
1042 scsi_flush_work(shost);
1043 return 1;
1044}
1045
1046static void mvs_scan_start(struct Scsi_Host *shost)
1047{
1048 int i;
1049 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050
1051 for (i = 0; i < mvi->chip->n_phy; ++i) {
1052 mvs_bytes_dmaed(mvi, i);
1053 }
1054}
1055
1056static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev)
1057{
1058 int rc;
1059
1060 rc = sas_slave_alloc(scsi_dev);
1061
1062 return rc;
1063}
1064
1065static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events)
1066{
1067 struct pci_dev *pdev = mvi->pdev;
1068 struct sas_ha_struct *sas_ha = &mvi->sas;
1069 struct mvs_phy *phy = &mvi->phy[port_no];
1070 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1071
1072 phy->irq_status = mvs_read_port_irq_stat(mvi, port_no);
1073 /*
1074 * events is port event now ,
1075 * we need check the interrupt status which belongs to per port.
1076 */
1077 dev_printk(KERN_DEBUG, &pdev->dev,
1078 "Port %d Event = %X\n",
1079 port_no, phy->irq_status);
1080
1081 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1082 if (!mvs_is_phy_ready(mvi, port_no)) {
1083 sas_phy_disconnected(sas_phy);
1084 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1085 } else
1086 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1087 }
1088 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1089 if (phy->irq_status & PHYEV_COMWAKE) {
1090 u32 tmp = mvs_read_port_irq_mask(mvi, port_no);
1091 mvs_write_port_irq_mask(mvi, port_no,
1092 tmp | PHYEV_SIG_FIS);
1093 }
1094 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1095 phy->phy_status = mvs_is_phy_ready(mvi, port_no);
1096 if (phy->phy_status) {
1097 mvs_detect_porttype(mvi, port_no);
1098
1099 if (phy->phy_type & PORT_TYPE_SATA) {
1100 u32 tmp = mvs_read_port_irq_mask(mvi,
1101 port_no);
1102 tmp &= ~PHYEV_SIG_FIS;
1103 mvs_write_port_irq_mask(mvi,
1104 port_no, tmp);
1105 }
1106
1107 mvs_update_phyinfo(mvi, port_no, 0);
1108 sas_ha->notify_phy_event(sas_phy,
1109 PHYE_OOB_DONE);
1110 mvs_bytes_dmaed(mvi, port_no);
1111 } else {
1112 dev_printk(KERN_DEBUG, &pdev->dev,
1113 "plugin interrupt but phy is gone\n");
1114 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1115 NULL);
1116 }
1117 } else if (phy->irq_status & PHYEV_BROAD_CH)
1118 sas_ha->notify_port_event(sas_phy,
1119 PORTE_BROADCAST_RCVD);
1120 }
1121 mvs_write_port_irq_stat(mvi, port_no, phy->irq_status);
1122}
1123
1124static void mvs_int_sata(struct mvs_info *mvi)
1125{
1126 /* FIXME */
1127}
1128
1129static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
1130 struct mvs_slot_info *slot, u32 slot_idx)
1131{
1132 if (!sas_protocol_ata(task->task_proto))
1133 if (slot->n_elem)
1134 pci_unmap_sg(mvi->pdev, task->scatter,
1135 slot->n_elem, task->data_dir);
1136
1137 switch (task->task_proto) {
1138 case SAS_PROTOCOL_SMP:
1139 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
1140 PCI_DMA_FROMDEVICE);
1141 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
1142 PCI_DMA_TODEVICE);
1143 break;
1144
1145 case SAS_PROTOCOL_SATA:
1146 case SAS_PROTOCOL_STP:
1147 case SAS_PROTOCOL_SSP:
1148 default:
1149 /* do nothing */
1150 break;
1151 }
1152
1153 slot->task = NULL;
1154 mvs_tag_clear(mvi, slot_idx);
1155}
1156
1157static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1158 u32 slot_idx)
1159{
1160 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1161 u64 err_dw0 = *(u32 *) slot->response;
1162 void __iomem *regs = mvi->regs;
1163 u32 tmp;
1164
1165 if (err_dw0 & CMD_ISS_STPD)
1166 if (sas_protocol_ata(task->task_proto)) {
1167 tmp = mr32(INT_STAT_SRS);
1168 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1169 }
1170
1171 mvs_hba_sb_dump(mvi, slot_idx, task->task_proto);
1172}
1173
1174static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
1175{
1176 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1177 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1178 struct sas_task *task = slot->task;
1179 struct task_status_struct *tstat = &task->task_status;
1180 struct mvs_port *port = &mvi->port[task->dev->port->id];
1181 bool aborted;
1182 void *to;
1183
1184 spin_lock(&task->task_state_lock);
1185 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1186 if (!aborted) {
1187 task->task_state_flags &=
1188 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1189 task->task_state_flags |= SAS_TASK_STATE_DONE;
1190 }
1191 spin_unlock(&task->task_state_lock);
1192
1193 if (aborted)
1194 return -1;
1195
1196 memset(tstat, 0, sizeof(*tstat));
1197 tstat->resp = SAS_TASK_COMPLETE;
1198
1199
1200 if (unlikely(!port->port_attached)) {
1201 tstat->stat = SAS_PHY_DOWN;
1202 goto out;
1203 }
1204
1205 /* error info record present */
1206 if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) {
1207 tstat->stat = SAM_CHECK_COND;
1208 mvs_slot_err(mvi, task, slot_idx);
1209 goto out;
1210 }
1211
1212 switch (task->task_proto) {
1213 case SAS_PROTOCOL_SSP:
1214 /* hw says status == 0, datapres == 0 */
1215 if (rx_desc & RXQ_GOOD) {
1216 tstat->stat = SAM_GOOD;
1217 tstat->resp = SAS_TASK_COMPLETE;
1218 }
1219 /* response frame present */
1220 else if (rx_desc & RXQ_RSP) {
1221 struct ssp_response_iu *iu =
1222 slot->response + sizeof(struct mvs_err_info);
1223 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1224 }
1225
1226 /* should never happen? */
1227 else
1228 tstat->stat = SAM_CHECK_COND;
1229 break;
1230
1231 case SAS_PROTOCOL_SMP: {
1232 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1233 tstat->stat = SAM_GOOD;
1234 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1235 memcpy(to + sg_resp->offset,
1236 slot->response + sizeof(struct mvs_err_info),
1237 sg_dma_len(sg_resp));
1238 kunmap_atomic(to, KM_IRQ0);
1239 break;
1240 }
1241
1242 case SAS_PROTOCOL_SATA:
1243 case SAS_PROTOCOL_STP:
1244 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1245 struct ata_task_resp *resp =
1246 (struct ata_task_resp *)tstat->buf;
1247
1248 if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) ==
1249 RXQ_DONE)
1250 tstat->stat = SAM_GOOD;
1251 else
1252 tstat->stat = SAM_CHECK_COND;
1253
1254 resp->frame_len = sizeof(struct dev_to_host_fis);
1255 memcpy(&resp->ending_fis[0],
1256 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1257 sizeof(struct dev_to_host_fis));
1258 if (resp->ending_fis[2] & ATA_ERR)
1259 mvs_hexdump(16, resp->ending_fis, 0);
1260 break;
1261 }
1262
1263 default:
1264 tstat->stat = SAM_CHECK_COND;
1265 break;
1266 }
1267
1268out:
1269 mvs_slot_free(mvi, task, slot, slot_idx);
1270 task->task_done(task);
1271 return tstat->stat;
1272}
1273
1274static void mvs_int_full(struct mvs_info *mvi)
1275{
1276 void __iomem *regs = mvi->regs;
1277 u32 tmp, stat;
1278 int i;
1279
1280 stat = mr32(INT_STAT);
1281
1282 mvs_int_rx(mvi, false);
1283
1284 for (i = 0; i < MVS_MAX_PORTS; i++) {
1285 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1286 if (tmp)
1287 mvs_int_port(mvi, i, tmp);
1288 }
1289
1290 if (stat & CINT_SRS)
1291 mvs_int_sata(mvi);
1292
1293 mw32(INT_STAT, stat);
1294}
1295
1296static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1297{
1298 void __iomem *regs = mvi->regs;
1299 u32 rx_prod_idx, rx_desc;
1300 bool attn = false;
1301 struct pci_dev *pdev = mvi->pdev;
1302
1303 /* the first dword in the RX ring is special: it contains
1304 * a mirror of the hardware's RX producer index, so that
1305 * we don't have to stall the CPU reading that register.
1306 * The actual RX ring is offset by one dword, due to this.
1307 */
1308 rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1309 if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */
1310 mvi->rx_cons = 0xfff;
1311 return 0;
1312 }
1313
1314 /* The CMPL_Q may come late, read from register and try again
1315 * note: if coalescing is enabled,
1316 * it will need to read from register every time for sure
1317 */
1318 if (mvi->rx_cons == rx_prod_idx)
1319 return 0;
1320
1321 if (mvi->rx_cons == 0xfff)
1322 mvi->rx_cons = MVS_RX_RING_SZ - 1;
1323
1324 while (mvi->rx_cons != rx_prod_idx) {
1325
1326 /* increment our internal RX consumer pointer */
1327 mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1);
1328
1329 rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]);
1330
1331 mvs_hba_cq_dump(mvi);
1332
1333 if (likely(rx_desc & RXQ_DONE))
1334 mvs_slot_complete(mvi, rx_desc);
1335 if (rx_desc & RXQ_ATTN) {
1336 attn = true;
1337 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1338 rx_desc);
1339 } else if (rx_desc & RXQ_ERR) {
1340 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1341 rx_desc);
1342 }
1343 }
1344
1345 if (attn && self_clear)
1346 mvs_int_full(mvi);
1347
1348 return 0;
1349}
1350
1351static irqreturn_t mvs_interrupt(int irq, void *opaque)
1352{
1353 struct mvs_info *mvi = opaque;
1354 void __iomem *regs = mvi->regs;
1355 u32 stat;
1356
1357 stat = mr32(GBL_INT_STAT);
1358
1359 /* clear CMD_CMPLT ASAP */
1360 mw32_f(INT_STAT, CINT_DONE);
1361
1362 if (stat == 0 || stat == 0xffffffff)
1363 return IRQ_NONE;
1364
1365 spin_lock(&mvi->lock);
1366
1367 mvs_int_full(mvi);
1368
1369 spin_unlock(&mvi->lock);
1370
1371 return IRQ_HANDLED;
1372}
1373
1374#ifndef MVS_DISABLE_MSI
1375static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1376{
1377 struct mvs_info *mvi = opaque;
1378
1379 spin_lock(&mvi->lock);
1380
1381 mvs_int_rx(mvi, true);
1382
1383 spin_unlock(&mvi->lock);
1384
1385 return IRQ_HANDLED;
1386}
1387#endif
1388
1389struct mvs_task_exec_info {
1390 struct sas_task *task;
1391 struct mvs_cmd_hdr *hdr;
1392 struct mvs_port *port;
1393 u32 tag;
1394 int n_elem;
1395};
1396
1397static int mvs_task_prep_smp(struct mvs_info *mvi,
1398 struct mvs_task_exec_info *tei)
1399{
1400 int elem, rc, i;
1401 struct sas_task *task = tei->task;
1402 struct mvs_cmd_hdr *hdr = tei->hdr;
1403 struct scatterlist *sg_req, *sg_resp;
1404 u32 req_len, resp_len, tag = tei->tag;
1405 void *buf_tmp;
1406 u8 *buf_oaf;
1407 dma_addr_t buf_tmp_dma;
1408 struct mvs_prd *buf_prd;
1409 struct scatterlist *sg;
1410 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1411 struct asd_sas_port *sas_port = task->dev->port;
1412 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1413#if _MV_DUMP
1414 u8 *buf_cmd;
1415 void *from;
1416#endif
1417 /*
1418 * DMA-map SMP request, response buffers
1419 */
1420 sg_req = &task->smp_task.smp_req;
1421 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
1422 if (!elem)
1423 return -ENOMEM;
1424 req_len = sg_dma_len(sg_req);
1425
1426 sg_resp = &task->smp_task.smp_resp;
1427 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
1428 if (!elem) {
1429 rc = -ENOMEM;
1430 goto err_out;
1431 }
1432 resp_len = sg_dma_len(sg_resp);
1433
1434 /* must be in dwords */
1435 if ((req_len & 0x3) || (resp_len & 0x3)) {
1436 rc = -EINVAL;
1437 goto err_out_2;
1438 }
1439
1440 /*
1441 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1442 */
1443
1444 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1445 buf_tmp = slot->buf;
1446 buf_tmp_dma = slot->buf_dma;
1447
1448#if _MV_DUMP
1449 buf_cmd = buf_tmp;
1450 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1451 buf_tmp += req_len;
1452 buf_tmp_dma += req_len;
1453 slot->cmd_size = req_len;
1454#else
1455 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1456#endif
1457
1458 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1459 buf_oaf = buf_tmp;
1460 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1461
1462 buf_tmp += MVS_OAF_SZ;
1463 buf_tmp_dma += MVS_OAF_SZ;
1464
1465 /* region 3: PRD table ********************************************* */
1466 buf_prd = buf_tmp;
1467 if (tei->n_elem)
1468 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1469 else
1470 hdr->prd_tbl = 0;
1471
1472 i = sizeof(struct mvs_prd) * tei->n_elem;
1473 buf_tmp += i;
1474 buf_tmp_dma += i;
1475
1476 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1477 slot->response = buf_tmp;
1478 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1479
1480 /*
1481 * Fill in TX ring and command slot header
1482 */
1483 slot->tx = mvi->tx_prod;
1484 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1485 TXQ_MODE_I | tag |
1486 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1487
1488 hdr->flags |= flags;
1489 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
1490 hdr->tags = cpu_to_le32(tag);
1491 hdr->data_len = 0;
1492
1493 /* generate open address frame hdr (first 12 bytes) */
1494 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1495 buf_oaf[1] = task->dev->linkrate & 0xf;
1496 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1497 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1498
1499 /* fill in PRD (scatter/gather) table, if any */
1500 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1501 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1502 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1503 buf_prd++;
1504 }
1505
1506#if _MV_DUMP
1507 /* copy cmd table */
1508 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1509 memcpy(buf_cmd, from + sg_req->offset, req_len);
1510 kunmap_atomic(from, KM_IRQ0);
1511#endif
1512 return 0;
1513
1514err_out_2:
1515 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
1516 PCI_DMA_FROMDEVICE);
1517err_out:
1518 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
1519 PCI_DMA_TODEVICE);
1520 return rc;
1521}
1522
1523static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1524{
1525 void __iomem *regs = mvi->regs;
1526 u32 tmp, offs;
1527 u8 *tfs = &port->taskfileset;
1528
1529 if (*tfs == MVS_ID_NOT_MAPPED)
1530 return;
1531
1532 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1533 if (*tfs < 16) {
1534 tmp = mr32(PCS);
1535 mw32(PCS, tmp & ~offs);
1536 } else {
1537 tmp = mr32(CTL);
1538 mw32(CTL, tmp & ~offs);
1539 }
1540
1541 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1542 if (tmp)
1543 mw32(INT_STAT_SRS, tmp);
1544
1545 *tfs = MVS_ID_NOT_MAPPED;
1546}
1547
1548static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1549{
1550 int i;
1551 u32 tmp, offs;
1552 void __iomem *regs = mvi->regs;
1553
1554 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1555 return 0;
1556
1557 tmp = mr32(PCS);
1558
1559 for (i = 0; i < mvi->chip->srs_sz; i++) {
1560 if (i == 16)
1561 tmp = mr32(CTL);
1562 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1563 if (!(tmp & offs)) {
1564 port->taskfileset = i;
1565
1566 if (i < 16)
1567 mw32(PCS, tmp | offs);
1568 else
1569 mw32(CTL, tmp | offs);
1570 tmp = mr32(INT_STAT_SRS) & (1U << i);
1571 if (tmp)
1572 mw32(INT_STAT_SRS, tmp);
1573 return 0;
1574 }
1575 }
1576 return MVS_ID_NOT_MAPPED;
1577}
1578
1579static u32 mvs_get_ncq_tag(struct sas_task *task)
1580{
1581 u32 tag = 0;
1582 struct ata_queued_cmd *qc = task->uldd_task;
1583
1584 if (qc)
1585 tag = qc->tag;
1586
1587 return tag;
1588}
1589
1590static int mvs_task_prep_ata(struct mvs_info *mvi,
1591 struct mvs_task_exec_info *tei)
1592{
1593 struct sas_task *task = tei->task;
1594 struct domain_device *dev = task->dev;
1595 struct mvs_cmd_hdr *hdr = tei->hdr;
1596 struct asd_sas_port *sas_port = dev->port;
1597 struct mvs_slot_info *slot;
1598 struct scatterlist *sg;
1599 struct mvs_prd *buf_prd;
1600 struct mvs_port *port = tei->port;
1601 u32 tag = tei->tag;
1602 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1603 void *buf_tmp;
1604 u8 *buf_cmd, *buf_oaf;
1605 dma_addr_t buf_tmp_dma;
1606 u32 i, req_len, resp_len;
1607 const u32 max_resp_len = SB_RFB_MAX;
1608
1609 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1610 return -EBUSY;
1611
1612 slot = &mvi->slot_info[tag];
1613 slot->tx = mvi->tx_prod;
1614 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1617 (port->taskfileset << TXQ_SRS_SHIFT));
1618
1619 if (task->ata_task.use_ncq)
1620 flags |= MCH_FPDMA;
1621 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
1622 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1623 flags |= MCH_ATAPI;
1624 }
1625
1626 /* FIXME: fill in port multiplier number */
1627
1628 hdr->flags = cpu_to_le32(flags);
1629
1630 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1631 if (task->ata_task.use_ncq) {
1632 hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task));
1633 /*Fill in task file */
1634 task->ata_task.fis.sector_count = hdr->tags << 3;
1635 } else
1636 hdr->tags = cpu_to_le32(tag);
1637 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1638
1639 /*
1640 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1641 */
1642
1643 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
1644 buf_cmd = buf_tmp = slot->buf;
1645 buf_tmp_dma = slot->buf_dma;
1646
1647 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1648
1649 buf_tmp += MVS_ATA_CMD_SZ;
1650 buf_tmp_dma += MVS_ATA_CMD_SZ;
1651#if _MV_DUMP
1652 slot->cmd_size = MVS_ATA_CMD_SZ;
1653#endif
1654
1655 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1656 /* used for STP. unused for SATA? */
1657 buf_oaf = buf_tmp;
1658 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1659
1660 buf_tmp += MVS_OAF_SZ;
1661 buf_tmp_dma += MVS_OAF_SZ;
1662
1663 /* region 3: PRD table ********************************************* */
1664 buf_prd = buf_tmp;
1665 if (tei->n_elem)
1666 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1667 else
1668 hdr->prd_tbl = 0;
1669
1670 i = sizeof(struct mvs_prd) * tei->n_elem;
1671 buf_tmp += i;
1672 buf_tmp_dma += i;
1673
1674 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1675 /* FIXME: probably unused, for SATA. kept here just in case
1676 * we get a STP/SATA error information record
1677 */
1678 slot->response = buf_tmp;
1679 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1680
1681 req_len = sizeof(struct host_to_dev_fis);
1682 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1683 sizeof(struct mvs_err_info) - i;
1684
1685 /* request, response lengths */
1686 resp_len = min(resp_len, max_resp_len);
1687 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1688
1689 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1690 /* fill in command FIS and ATAPI CDB */
1691 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1692 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1693 memcpy(buf_cmd + STP_ATAPI_CMD,
1694 task->ata_task.atapi_packet, 16);
1695
1696 /* generate open address frame hdr (first 12 bytes) */
1697 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1698 buf_oaf[1] = task->dev->linkrate & 0xf;
1699 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1700 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1701
1702 /* fill in PRD (scatter/gather) table, if any */
1703 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1704 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1705 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1706 buf_prd++;
1707 }
1708
1709 return 0;
1710}
1711
1712static int mvs_task_prep_ssp(struct mvs_info *mvi,
1713 struct mvs_task_exec_info *tei)
1714{
1715 struct sas_task *task = tei->task;
1716 struct mvs_cmd_hdr *hdr = tei->hdr;
1717 struct mvs_port *port = tei->port;
1718 struct mvs_slot_info *slot;
1719 struct scatterlist *sg;
1720 struct mvs_prd *buf_prd;
1721 struct ssp_frame_hdr *ssp_hdr;
1722 void *buf_tmp;
1723 u8 *buf_cmd, *buf_oaf, fburst = 0;
1724 dma_addr_t buf_tmp_dma;
1725 u32 flags;
1726 u32 resp_len, req_len, i, tag = tei->tag;
1727 const u32 max_resp_len = SB_RFB_MAX;
1728
1729 slot = &mvi->slot_info[tag];
1730
1731 slot->tx = mvi->tx_prod;
1732 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1733 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1734 (port->wide_port_phymap << TXQ_PHY_SHIFT));
1735
1736 flags = MCH_RETRY;
1737 if (task->ssp_task.enable_first_burst) {
1738 flags |= MCH_FBURST;
1739 fburst = (1 << 7);
1740 }
1741 hdr->flags = cpu_to_le32(flags |
1742 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1743 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1744
1745 hdr->tags = cpu_to_le32(tag);
1746 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1747
1748 /*
1749 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1750 */
1751
1752 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1753 buf_cmd = buf_tmp = slot->buf;
1754 buf_tmp_dma = slot->buf_dma;
1755
1756 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1757
1758 buf_tmp += MVS_SSP_CMD_SZ;
1759 buf_tmp_dma += MVS_SSP_CMD_SZ;
1760#if _MV_DUMP
1761 slot->cmd_size = MVS_SSP_CMD_SZ;
1762#endif
1763
1764 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1765 buf_oaf = buf_tmp;
1766 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1767
1768 buf_tmp += MVS_OAF_SZ;
1769 buf_tmp_dma += MVS_OAF_SZ;
1770
1771 /* region 3: PRD table ********************************************* */
1772 buf_prd = buf_tmp;
1773 if (tei->n_elem)
1774 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1775 else
1776 hdr->prd_tbl = 0;
1777
1778 i = sizeof(struct mvs_prd) * tei->n_elem;
1779 buf_tmp += i;
1780 buf_tmp_dma += i;
1781
1782 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1783 slot->response = buf_tmp;
1784 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1785
1786 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
1787 sizeof(struct mvs_err_info) - i;
1788 resp_len = min(resp_len, max_resp_len);
1789
1790 req_len = sizeof(struct ssp_frame_hdr) + 28;
1791
1792 /* request, response lengths */
1793 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1794
1795 /* generate open address frame hdr (first 12 bytes) */
1796 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
1797 buf_oaf[1] = task->dev->linkrate & 0xf;
1798 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1799 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1800
1801 /* fill in SSP frame header (Command Table.SSP frame header) */
1802 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
1803 ssp_hdr->frame_type = SSP_COMMAND;
1804 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
1805 HASHED_SAS_ADDR_SIZE);
1806 memcpy(ssp_hdr->hashed_src_addr,
1807 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
1808 ssp_hdr->tag = cpu_to_be16(tag);
1809
1810 /* fill in command frame IU */
1811 buf_cmd += sizeof(*ssp_hdr);
1812 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1813 buf_cmd[9] = fburst | task->ssp_task.task_attr |
1814 (task->ssp_task.task_prio << 3);
1815 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
1816
1817 /* fill in PRD (scatter/gather) table, if any */
1818 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1819 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1820 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1821 buf_prd++;
1822 }
1823
1824 return 0;
1825}
1826
1827static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
1828{
1829 struct domain_device *dev = task->dev;
1830 struct mvs_info *mvi = dev->port->ha->lldd_ha;
1831 struct pci_dev *pdev = mvi->pdev;
1832 void __iomem *regs = mvi->regs;
1833 struct mvs_task_exec_info tei;
1834 struct sas_task *t = task;
1835 u32 tag = 0xdeadbeef, rc, n_elem = 0;
1836 unsigned long flags;
1837 u32 n = num, pass = 0;
1838
1839 spin_lock_irqsave(&mvi->lock, flags);
1840
1841 do {
1842 tei.port = &mvi->port[dev->port->id];
1843
1844 if (!tei.port->port_attached) {
1845 struct task_status_struct *ts = &t->task_status;
1846 ts->stat = SAS_PHY_DOWN;
1847 t->task_done(t);
1848 rc = 0;
1849 goto exec_exit;
1850 }
1851 if (!sas_protocol_ata(t->task_proto)) {
1852 if (t->num_scatter) {
1853 n_elem = pci_map_sg(mvi->pdev, t->scatter,
1854 t->num_scatter,
1855 t->data_dir);
1856 if (!n_elem) {
1857 rc = -ENOMEM;
1858 goto err_out;
1859 }
1860 }
1861 } else {
1862 n_elem = t->num_scatter;
1863 }
1864
1865 rc = mvs_tag_alloc(mvi, &tag);
1866 if (rc)
1867 goto err_out;
1868
1869 mvi->slot_info[tag].task = t;
1870 mvi->slot_info[tag].n_elem = n_elem;
1871 memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ);
1872 tei.task = t;
1873 tei.hdr = &mvi->slot[tag];
1874 tei.tag = tag;
1875 tei.n_elem = n_elem;
1876
1877 switch (t->task_proto) {
1878 case SAS_PROTOCOL_SMP:
1879 rc = mvs_task_prep_smp(mvi, &tei);
1880 break;
1881 case SAS_PROTOCOL_SSP:
1882 rc = mvs_task_prep_ssp(mvi, &tei);
1883 break;
1884 case SAS_PROTOCOL_SATA:
1885 case SAS_PROTOCOL_STP:
1886 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1887 rc = mvs_task_prep_ata(mvi, &tei);
1888 break;
1889 default:
1890 dev_printk(KERN_ERR, &pdev->dev,
1891 "unknown sas_task proto: 0x%x\n",
1892 t->task_proto);
1893 rc = -EINVAL;
1894 break;
1895 }
1896
1897 if (rc)
1898 goto err_out_tag;
1899
1900 /* TODO: select normal or high priority */
1901
1902 spin_lock(&t->task_state_lock);
1903 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
1904 spin_unlock(&t->task_state_lock);
1905
1906 if (n == 1) {
1907 spin_unlock_irqrestore(&mvi->lock, flags);
1908 mw32(TX_PROD_IDX, mvi->tx_prod);
1909 }
1910 mvs_hba_memory_dump(mvi, tag, t->task_proto);
1911
1912 ++pass;
1913 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1914
1915 if (n == 1)
1916 break;
1917
1918 t = list_entry(t->list.next, struct sas_task, list);
1919 } while (--n);
1920
1921 return 0;
1922
1923err_out_tag:
1924 mvs_tag_free(mvi, tag);
1925err_out:
1926 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
1927 if (!sas_protocol_ata(t->task_proto))
1928 if (n_elem)
1929 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
1930 t->data_dir);
1931exec_exit:
1932 if (pass)
1933 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1934 spin_unlock_irqrestore(&mvi->lock, flags);
1935 return rc;
1936}
1937
1938static int mvs_task_abort(struct sas_task *task)
1939{
1940 int rc = 1;
1941 unsigned long flags;
1942 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1943 struct pci_dev *pdev = mvi->pdev;
1944
1945 spin_lock_irqsave(&task->task_state_lock, flags);
1946 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1947 rc = TMF_RESP_FUNC_COMPLETE;
1948 goto out_done;
1949 }
1950 spin_unlock_irqrestore(&task->task_state_lock, flags);
1951
1952 /*FIXME*/
1953 rc = TMF_RESP_FUNC_COMPLETE;
1954
1955 switch (task->task_proto) {
1956 case SAS_PROTOCOL_SMP:
1957 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! ");
1958 break;
1959 case SAS_PROTOCOL_SSP:
1960 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! ");
1961 break;
1962 case SAS_PROTOCOL_SATA:
1963 case SAS_PROTOCOL_STP:
1964 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
1965 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! "
1966 "Dump D2H FIS: \n");
1967 mvs_hexdump(sizeof(struct host_to_dev_fis),
1968 (void *)&task->ata_task.fis, 0);
1969 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
1970 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
1971 break;
1972 }
1973 default:
1974 break;
1975 }
1976out_done:
1977 return rc;
1978}
1979
1980static void mvs_free(struct mvs_info *mvi)
1981{
1982 int i;
1983
1984 if (!mvi)
1985 return;
1986
1987 for (i = 0; i < MVS_SLOTS; i++) {
1988 struct mvs_slot_info *slot = &mvi->slot_info[i];
1989
1990 if (slot->buf)
1991 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
1992 slot->buf, slot->buf_dma);
1993 }
1994
1995 if (mvi->tx)
1996 dma_free_coherent(&mvi->pdev->dev,
1997 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
1998 mvi->tx, mvi->tx_dma);
1999 if (mvi->rx_fis)
2000 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
2001 mvi->rx_fis, mvi->rx_fis_dma);
2002 if (mvi->rx)
2003 dma_free_coherent(&mvi->pdev->dev,
2004 sizeof(*mvi->rx) * MVS_RX_RING_SZ,
2005 mvi->rx, mvi->rx_dma);
2006 if (mvi->slot)
2007 dma_free_coherent(&mvi->pdev->dev,
2008 sizeof(*mvi->slot) * MVS_SLOTS,
2009 mvi->slot, mvi->slot_dma);
2010#ifdef MVS_ENABLE_PERI
2011 if (mvi->peri_regs)
2012 iounmap(mvi->peri_regs);
2013#endif
2014 if (mvi->regs)
2015 iounmap(mvi->regs);
2016 if (mvi->shost)
2017 scsi_host_put(mvi->shost);
2018 kfree(mvi->sas.sas_port);
2019 kfree(mvi->sas.sas_phy);
2020 kfree(mvi);
2021}
2022
2023/* FIXME: locking? */
2024static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
2025 void *funcdata)
2026{
2027 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
2028 int rc = 0, phy_id = sas_phy->id;
2029 u32 tmp;
2030
2031 tmp = mvs_read_phy_ctl(mvi, phy_id);
2032
2033 switch (func) {
2034 case PHY_FUNC_SET_LINK_RATE:{
2035 struct sas_phy_linkrates *rates = funcdata;
2036 u32 lrmin = 0, lrmax = 0;
2037
2038 lrmin = (rates->minimum_linkrate << 8);
2039 lrmax = (rates->maximum_linkrate << 12);
2040
2041 if (lrmin) {
2042 tmp &= ~(0xf << 8);
2043 tmp |= lrmin;
2044 }
2045 if (lrmax) {
2046 tmp &= ~(0xf << 12);
2047 tmp |= lrmax;
2048 }
2049 mvs_write_phy_ctl(mvi, phy_id, tmp);
2050 break;
2051 }
2052
2053 case PHY_FUNC_HARD_RESET:
2054 if (tmp & PHY_RST_HARD)
2055 break;
2056 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
2057 break;
2058
2059 case PHY_FUNC_LINK_RESET:
2060 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
2061 break;
2062
2063 case PHY_FUNC_DISABLE:
2064 case PHY_FUNC_RELEASE_SPINUP_HOLD:
2065 default:
2066 rc = -EOPNOTSUPP;
2067 }
2068
2069 return rc;
2070}
2071
2072static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
2073{
2074 struct mvs_phy *phy = &mvi->phy[phy_id];
2075 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2076
2077 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
2078 sas_phy->class = SAS;
2079 sas_phy->iproto = SAS_PROTOCOL_ALL;
2080 sas_phy->tproto = 0;
2081 sas_phy->type = PHY_TYPE_PHYSICAL;
2082 sas_phy->role = PHY_ROLE_INITIATOR;
2083 sas_phy->oob_mode = OOB_NOT_CONNECTED;
2084 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
2085
2086 sas_phy->id = phy_id;
2087 sas_phy->sas_addr = &mvi->sas_addr[0];
2088 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
2089 sas_phy->ha = &mvi->sas;
2090 sas_phy->lldd_phy = phy;
2091}
2092
2093static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2094 const struct pci_device_id *ent)
2095{
2096 struct mvs_info *mvi;
2097 unsigned long res_start, res_len, res_flag;
2098 struct asd_sas_phy **arr_phy;
2099 struct asd_sas_port **arr_port;
2100 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
2101 int i;
2102
2103 /*
2104 * alloc and init our per-HBA mvs_info struct
2105 */
2106
2107 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
2108 if (!mvi)
2109 return NULL;
2110
2111 spin_lock_init(&mvi->lock);
2112 mvi->pdev = pdev;
2113 mvi->chip = chip;
2114
2115 if (pdev->device == 0x6440 && pdev->revision == 0)
2116 mvi->flags |= MVF_PHY_PWR_FIX;
2117
2118 /*
2119 * alloc and init SCSI, SAS glue
2120 */
2121
2122 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
2123 if (!mvi->shost)
2124 goto err_out;
2125
2126 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2127 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2128 if (!arr_phy || !arr_port)
2129 goto err_out;
2130
2131 for (i = 0; i < MVS_MAX_PHYS; i++) {
2132 mvs_phy_init(mvi, i);
2133 arr_phy[i] = &mvi->phy[i].sas_phy;
2134 arr_port[i] = &mvi->port[i].sas_port;
2135 }
2136
2137 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
2138 mvi->shost->transportt = mvs_stt;
2139 mvi->shost->max_id = 21;
2140 mvi->shost->max_lun = ~0;
2141 mvi->shost->max_channel = 0;
2142 mvi->shost->max_cmd_len = 16;
2143
2144 mvi->sas.sas_ha_name = DRV_NAME;
2145 mvi->sas.dev = &pdev->dev;
2146 mvi->sas.lldd_module = THIS_MODULE;
2147 mvi->sas.sas_addr = &mvi->sas_addr[0];
2148 mvi->sas.sas_phy = arr_phy;
2149 mvi->sas.sas_port = arr_port;
2150 mvi->sas.num_phys = chip->n_phy;
2151 mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1;
2152 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2153 mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1;
2154 mvi->sas.lldd_ha = mvi;
2155 mvi->sas.core.shost = mvi->shost;
2156
2157 mvs_tag_init(mvi);
2158
2159 /*
2160 * ioremap main and peripheral registers
2161 */
2162
2163#ifdef MVS_ENABLE_PERI
2164 res_start = pci_resource_start(pdev, 2);
2165 res_len = pci_resource_len(pdev, 2);
2166 if (!res_start || !res_len)
2167 goto err_out;
2168
2169 mvi->peri_regs = ioremap_nocache(res_start, res_len);
2170 if (!mvi->peri_regs)
2171 goto err_out;
2172#endif
2173
2174 res_start = pci_resource_start(pdev, 4);
2175 res_len = pci_resource_len(pdev, 4);
2176 if (!res_start || !res_len)
2177 goto err_out;
2178
2179 res_flag = pci_resource_flags(pdev, 4);
2180 if (res_flag & IORESOURCE_CACHEABLE)
2181 mvi->regs = ioremap(res_start, res_len);
2182 else
2183 mvi->regs = ioremap_nocache(res_start, res_len);
2184
2185 if (!mvi->regs)
2186 goto err_out;
2187
2188 /*
2189 * alloc and init our DMA areas
2190 */
2191
2192 mvi->tx = dma_alloc_coherent(&pdev->dev,
2193 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2194 &mvi->tx_dma, GFP_KERNEL);
2195 if (!mvi->tx)
2196 goto err_out;
2197 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
2198
2199 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
2200 &mvi->rx_fis_dma, GFP_KERNEL);
2201 if (!mvi->rx_fis)
2202 goto err_out;
2203 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2204
2205 mvi->rx = dma_alloc_coherent(&pdev->dev,
2206 sizeof(*mvi->rx) * MVS_RX_RING_SZ,
2207 &mvi->rx_dma, GFP_KERNEL);
2208 if (!mvi->rx)
2209 goto err_out;
2210 memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ);
2211
2212 mvi->rx[0] = cpu_to_le32(0xfff);
2213 mvi->rx_cons = 0xfff;
2214
2215 mvi->slot = dma_alloc_coherent(&pdev->dev,
2216 sizeof(*mvi->slot) * MVS_SLOTS,
2217 &mvi->slot_dma, GFP_KERNEL);
2218 if (!mvi->slot)
2219 goto err_out;
2220 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
2221
2222 for (i = 0; i < MVS_SLOTS; i++) {
2223 struct mvs_slot_info *slot = &mvi->slot_info[i];
2224
2225 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
2226 &slot->buf_dma, GFP_KERNEL);
2227 if (!slot->buf)
2228 goto err_out;
2229 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2230 }
2231
2232 /* finally, read NVRAM to get our SAS address */
2233 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
2234 goto err_out;
2235 return mvi;
2236
2237err_out:
2238 mvs_free(mvi);
2239 return NULL;
2240}
2241
2242static u32 mvs_cr32(void __iomem *regs, u32 addr)
2243{
2244 mw32(CMD_ADDR, addr);
2245 return mr32(CMD_DATA);
2246}
2247
2248static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
2249{
2250 mw32(CMD_ADDR, addr);
2251 mw32(CMD_DATA, val);
2252}
2253
2254static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
2255{
2256 void __iomem *regs = mvi->regs;
2257 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
2258 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
2259}
2260
2261static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
2262{
2263 void __iomem *regs = mvi->regs;
2264 if (port < 4)
2265 mw32(P0_SER_CTLSTAT + port * 4, val);
2266 else
2267 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2268}
2269
2270static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2271{
2272 void __iomem *regs = mvi->regs + off;
2273 void __iomem *regs2 = mvi->regs + off2;
2274 return (port < 4)?readl(regs + port * 8):
2275 readl(regs2 + (port - 4) * 8);
2276}
2277
2278static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2279 u32 port, u32 val)
2280{
2281 void __iomem *regs = mvi->regs + off;
2282 void __iomem *regs2 = mvi->regs + off2;
2283 if (port < 4)
2284 writel(val, regs + port * 8);
2285 else
2286 writel(val, regs2 + (port - 4) * 8);
2287}
2288
2289static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2290{
2291 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2292}
2293
2294static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2295{
2296 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2297}
2298
2299static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2300{
2301 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2302}
2303
2304static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2305{
2306 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2307}
2308
2309static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2310{
2311 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2312}
2313
2314static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2315{
2316 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2317}
2318
2319static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2320{
2321 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2322}
2323
2324static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2325{
2326 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2327}
2328
2329static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2330{
2331 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2332}
2333
2334static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
2335{
2336 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
2337}
2338
2339static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2340{
2341 void __iomem *regs = mvi->regs;
2342 u32 tmp;
2343
2344 /* workaround for SATA R-ERR, to ignore phy glitch */
2345 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2346 tmp &= ~(1 << 9);
2347 tmp |= (1 << 10);
2348 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2349
2350 /* enable retry 127 times */
2351 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
2352
2353 /* extend open frame timeout to max */
2354 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
2355 tmp &= ~0xffff;
2356 tmp |= 0x3fff;
2357 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2358
2359 /* workaround for WDTIMEOUT , set to 550 ms */
2360 mvs_cw32(regs, CMD_WD_TIMER, 0xffffff);
2361
2362 /* not to halt for different port op during wideport link change */
2363 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
2364
2365 /* workaround for Seagate disk not-found OOB sequence, recv
2366 * COMINIT before sending out COMWAKE */
2367 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
2368 tmp &= 0x0000ffff;
2369 tmp |= 0x00fa0000;
2370 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
2371
2372 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2373 tmp &= 0x1fffffff;
2374 tmp |= (2U << 29); /* 8 ms retry */
2375 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2376
2377 /* TEST - for phy decoding error, adjust voltage levels */
2378 mw32(P0_VSR_ADDR + 0, 0x8);
2379 mw32(P0_VSR_DATA + 0, 0x2F0);
2380
2381 mw32(P0_VSR_ADDR + 8, 0x8);
2382 mw32(P0_VSR_DATA + 8, 0x2F0);
2383
2384 mw32(P0_VSR_ADDR + 16, 0x8);
2385 mw32(P0_VSR_DATA + 16, 0x2F0);
2386
2387 mw32(P0_VSR_ADDR + 24, 0x8);
2388 mw32(P0_VSR_DATA + 24, 0x2F0);
2389
2390}
2391
2392static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2393{
2394 void __iomem *regs = mvi->regs;
2395 u32 tmp;
2396
2397 tmp = mr32(PCS);
2398 if (mvi->chip->n_phy <= 4)
2399 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2400 else
2401 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2402 mw32(PCS, tmp);
2403}
2404
2405static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2406{
2407 void __iomem *regs = mvi->regs;
2408 u32 reg;
2409 struct mvs_phy *phy = &mvi->phy[i];
2410
2411 /* TODO check & save device type */
2412 reg = mr32(GBL_PORT_TYPE);
2413
2414 if (reg & MODE_SAS_SATA & (1 << i))
2415 phy->phy_type |= PORT_TYPE_SAS;
2416 else
2417 phy->phy_type |= PORT_TYPE_SATA;
2418}
2419
2420static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2421{
2422 u32 *s = (u32 *) buf;
2423
2424 if (!s)
2425 return NULL;
2426
2427 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2428 s[3] = mvs_read_port_cfg_data(mvi, i);
2429
2430 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2431 s[2] = mvs_read_port_cfg_data(mvi, i);
2432
2433 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2434 s[1] = mvs_read_port_cfg_data(mvi, i);
2435
2436 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2437 s[0] = mvs_read_port_cfg_data(mvi, i);
2438
2439 return (void *)s;
2440}
2441
2442static u32 mvs_is_sig_fis_received(u32 irq_status)
2443{
2444 return irq_status & PHYEV_SIG_FIS;
2445}
2446
2447static void mvs_update_wideport(struct mvs_info *mvi, int i)
2448{
2449 struct mvs_phy *phy = &mvi->phy[i];
2450 struct mvs_port *port = phy->port;
2451 int j, no;
2452
2453 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2454 if (no & 1) {
2455 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2456 mvs_write_port_cfg_data(mvi, no,
2457 port->wide_port_phymap);
2458 } else {
2459 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2460 mvs_write_port_cfg_data(mvi, no, 0);
2461 }
2462}
2463
2464static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2465{
2466 u32 tmp;
2467 struct mvs_phy *phy = &mvi->phy[i];
2468 struct mvs_port *port;
2469
2470 tmp = mvs_read_phy_ctl(mvi, i);
2471
2472 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2473 if (!phy->port)
2474 phy->phy_attached = 1;
2475 return tmp;
2476 }
2477
2478 port = phy->port;
2479 if (port) {
2480 if (phy->phy_type & PORT_TYPE_SAS) {
2481 port->wide_port_phymap &= ~(1U << i);
2482 if (!port->wide_port_phymap)
2483 port->port_attached = 0;
2484 mvs_update_wideport(mvi, i);
2485 } else if (phy->phy_type & PORT_TYPE_SATA)
2486 port->port_attached = 0;
2487 mvs_free_reg_set(mvi, phy->port);
2488 phy->port = NULL;
2489 phy->phy_attached = 0;
2490 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2491 }
2492 return 0;
2493}
2494
2495static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2496 int get_st)
2497{
2498 struct mvs_phy *phy = &mvi->phy[i];
2499 struct pci_dev *pdev = mvi->pdev;
2500 u32 tmp, j;
2501 u64 tmp64;
2502
2503 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2504 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2505
2506 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2507 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2508
2509 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2510 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2511
2512 if (get_st) {
2513 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2514 phy->phy_status = mvs_is_phy_ready(mvi, i);
2515 }
2516
2517 if (phy->phy_status) {
2518 u32 phy_st;
2519 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2520
2521 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2522 phy_st = mvs_read_port_cfg_data(mvi, i);
2523
2524 sas_phy->linkrate =
2525 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2526 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2527
2528 /* Updated attached_sas_addr */
2529 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2530 phy->att_dev_sas_addr =
2531 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2532
2533 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2534 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2535
2536 dev_printk(KERN_DEBUG, &pdev->dev,
2537 "phy[%d] Get Attached Address 0x%llX ,"
2538 " SAS Address 0x%llX\n",
2539 i, phy->att_dev_sas_addr, phy->dev_sas_addr);
2540 dev_printk(KERN_DEBUG, &pdev->dev,
2541 "Rate = %x , type = %d\n",
2542 sas_phy->linkrate, phy->phy_type);
2543
2544#if 1
2545 /*
2546 * If the device is capable of supporting a wide port
2547 * on its phys, it may configure the phys as a wide port.
2548 */
2549 if (phy->phy_type & PORT_TYPE_SAS)
2550 for (j = 0; j < mvi->chip->n_phy && j != i; ++j) {
2551 if ((mvi->phy[j].phy_attached) &&
2552 (mvi->phy[j].phy_type & PORT_TYPE_SAS))
2553 if (phy->att_dev_sas_addr ==
2554 mvi->phy[j].att_dev_sas_addr - 1) {
2555 phy->att_dev_sas_addr =
2556 mvi->phy[j].att_dev_sas_addr;
2557 break;
2558 }
2559 }
2560
2561#endif
2562
2563 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2564 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2565
2566 if (phy->phy_type & PORT_TYPE_SAS) {
2567 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2568 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2569 phy->identify.device_type =
2570 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2571
2572 if (phy->identify.device_type == SAS_END_DEV)
2573 phy->identify.target_port_protocols =
2574 SAS_PROTOCOL_SSP;
2575 else if (phy->identify.device_type != NO_DEVICE)
2576 phy->identify.target_port_protocols =
2577 SAS_PROTOCOL_SMP;
2578 if (phy_st & PHY_OOB_DTCTD)
2579 sas_phy->oob_mode = SAS_OOB_MODE;
2580 phy->frame_rcvd_size =
2581 sizeof(struct sas_identify_frame);
2582 } else if (phy->phy_type & PORT_TYPE_SATA) {
2583 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2584 if (mvs_is_sig_fis_received(phy->irq_status)) {
2585 if (phy_st & PHY_OOB_DTCTD)
2586 sas_phy->oob_mode = SATA_OOB_MODE;
2587 phy->frame_rcvd_size =
2588 sizeof(struct dev_to_host_fis);
2589 mvs_get_d2h_reg(mvi, i,
2590 (void *)sas_phy->frame_rcvd);
2591 } else {
2592 dev_printk(KERN_DEBUG, &pdev->dev,
2593 "No sig fis\n");
2594 }
2595 }
2596 /* workaround for HW phy decoding error on 1.5g disk drive */
2597 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2598 tmp = mvs_read_port_vsr_data(mvi, i);
2599 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2600 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2601 SAS_LINK_RATE_1_5_GBPS)
2602 tmp &= ~PHY_MODE6_DTL_SPEED;
2603 else
2604 tmp |= PHY_MODE6_DTL_SPEED;
2605 mvs_write_port_vsr_data(mvi, i, tmp);
2606
2607 }
2608 if (get_st)
2609 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2610}
2611
2612static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2613{
2614 struct sas_ha_struct *sas_ha = sas_phy->ha;
2615 struct mvs_info *mvi = sas_ha->lldd_ha;
2616 struct asd_sas_port *sas_port = sas_phy->port;
2617 struct mvs_phy *phy = sas_phy->lldd_phy;
2618 struct mvs_port *port = &mvi->port[sas_port->id];
2619 unsigned long flags;
2620
2621 spin_lock_irqsave(&mvi->lock, flags);
2622 port->port_attached = 1;
2623 phy->port = port;
2624 port->taskfileset = MVS_ID_NOT_MAPPED;
2625 if (phy->phy_type & PORT_TYPE_SAS) {
2626 port->wide_port_phymap = sas_port->phy_mask;
2627 mvs_update_wideport(mvi, sas_phy->id);
2628 }
2629 spin_unlock_irqrestore(&mvi->lock, flags);
2630}
2631
2632static int __devinit mvs_hw_init(struct mvs_info *mvi)
2633{
2634 void __iomem *regs = mvi->regs;
2635 int i;
2636 u32 tmp, cctl;
2637
2638 /* make sure interrupts are masked immediately (paranoia) */
2639 mw32(GBL_CTL, 0);
2640 tmp = mr32(GBL_CTL);
2641
2642 /* Reset Controller */
2643 if (!(tmp & HBA_RST)) {
2644 if (mvi->flags & MVF_PHY_PWR_FIX) {
2645 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2646 tmp &= ~PCTL_PWR_ON;
2647 tmp |= PCTL_OFF;
2648 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2649
2650 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2651 tmp &= ~PCTL_PWR_ON;
2652 tmp |= PCTL_OFF;
2653 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2654 }
2655
2656 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
2657 mw32_f(GBL_CTL, HBA_RST);
2658 }
2659
2660 /* wait for reset to finish; timeout is just a guess */
2661 i = 1000;
2662 while (i-- > 0) {
2663 msleep(10);
2664
2665 if (!(mr32(GBL_CTL) & HBA_RST))
2666 break;
2667 }
2668 if (mr32(GBL_CTL) & HBA_RST) {
2669 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
2670 return -EBUSY;
2671 }
2672
2673 /* Init Chip */
2674 /* make sure RST is set; HBA_RST /should/ have done that for us */
2675 cctl = mr32(CTL);
2676 if (cctl & CCTL_RST)
2677 cctl &= ~CCTL_RST;
2678 else
2679 mw32_f(CTL, cctl | CCTL_RST);
2680
2681 /* write to device control _AND_ device status register? - A.C. */
2682 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2683 tmp &= ~PRD_REQ_MASK;
2684 tmp |= PRD_REQ_SIZE;
2685 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2686
2687 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2688 tmp |= PCTL_PWR_ON;
2689 tmp &= ~PCTL_OFF;
2690 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2691
2692 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2693 tmp |= PCTL_PWR_ON;
2694 tmp &= ~PCTL_OFF;
2695 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2696
2697 mw32_f(CTL, cctl);
2698
2699 /* reset control */
2700 mw32(PCS, 0); /*MVS_PCS */
2701
2702 mvs_phy_hacks(mvi);
2703
2704 mw32(CMD_LIST_LO, mvi->slot_dma);
2705 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
2706
2707 mw32(RX_FIS_LO, mvi->rx_fis_dma);
2708 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
2709
2710 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
2711 mw32(TX_LO, mvi->tx_dma);
2712 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
2713
2714 mw32(RX_CFG, MVS_RX_RING_SZ);
2715 mw32(RX_LO, mvi->rx_dma);
2716 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
2717
2718 /* enable auto port detection */
2719 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2720 msleep(100);
2721 /* init and reset phys */
2722 for (i = 0; i < mvi->chip->n_phy; i++) {
2723 u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
2724 u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
2725
2726 mvs_detect_porttype(mvi, i);
2727
2728 /* set phy local SAS address */
2729 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2730 mvs_write_port_cfg_data(mvi, i, lo);
2731 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2732 mvs_write_port_cfg_data(mvi, i, hi);
2733
2734 /* reset phy */
2735 tmp = mvs_read_phy_ctl(mvi, i);
2736 tmp |= PHY_RST;
2737 mvs_write_phy_ctl(mvi, i, tmp);
2738 }
2739
2740 msleep(100);
2741
2742 for (i = 0; i < mvi->chip->n_phy; i++) {
2743 /* clear phy int status */
2744 tmp = mvs_read_port_irq_stat(mvi, i);
2745 tmp &= ~PHYEV_SIG_FIS;
2746 mvs_write_port_irq_stat(mvi, i, tmp);
2747
2748 /* set phy int mask */
2749 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
2750 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2751 mvs_write_port_irq_mask(mvi, i, tmp);
2752
2753 msleep(100);
2754 mvs_update_phyinfo(mvi, i, 1);
2755 mvs_enable_xmt(mvi, i);
2756 }
2757
2758 /* FIXME: update wide port bitmaps */
2759
2760 /* little endian for open address and command table, etc. */
2761 /* A.C.
2762 * it seems that ( from the spec ) turning on big-endian won't
2763 * do us any good on big-endian machines, need further confirmation
2764 */
2765 cctl = mr32(CTL);
2766 cctl |= CCTL_ENDIAN_CMD;
2767 cctl |= CCTL_ENDIAN_DATA;
2768 cctl &= ~CCTL_ENDIAN_OPEN;
2769 cctl |= CCTL_ENDIAN_RSP;
2770 mw32_f(CTL, cctl);
2771
2772 /* reset CMD queue */
2773 tmp = mr32(PCS);
2774 tmp |= PCS_CMD_RST;
2775 mw32(PCS, tmp);
2776 /* interrupt coalescing may cause missing HW interrput in some case,
2777 * and the max count is 0x1ff, while our max slot is 0x200,
2778 * it will make count 0.
2779 */
2780 tmp = 0;
2781 mw32(INT_COAL, tmp);
2782
2783 tmp = 0x100;
2784 mw32(INT_COAL_TMOUT, tmp);
2785
2786 /* ladies and gentlemen, start your engines */
2787 mw32(TX_CFG, 0);
2788 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
2789 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
2790 /* enable CMD/CMPL_Q/RESP mode */
2791 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
2792
2793 /* re-enable interrupts globally */
2794 mvs_hba_interrupt_enable(mvi);
2795
2796 /* enable completion queue interrupt */
2797 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM);
2798 mw32(INT_MASK, tmp);
2799
2800 return 0;
2801}
2802
2803static void __devinit mvs_print_info(struct mvs_info *mvi)
2804{
2805 struct pci_dev *pdev = mvi->pdev;
2806 static int printed_version;
2807
2808 if (!printed_version++)
2809 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2810
2811 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
2812 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
2813}
2814
2815static int __devinit mvs_pci_init(struct pci_dev *pdev,
2816 const struct pci_device_id *ent)
2817{
2818 int rc;
2819 struct mvs_info *mvi;
2820 irq_handler_t irq_handler = mvs_interrupt;
2821
2822 rc = pci_enable_device(pdev);
2823 if (rc)
2824 return rc;
2825
2826 pci_set_master(pdev);
2827
2828 rc = pci_request_regions(pdev, DRV_NAME);
2829 if (rc)
2830 goto err_out_disable;
2831
2832 rc = pci_go_64(pdev);
2833 if (rc)
2834 goto err_out_regions;
2835
2836 mvi = mvs_alloc(pdev, ent);
2837 if (!mvi) {
2838 rc = -ENOMEM;
2839 goto err_out_regions;
2840 }
2841
2842 rc = mvs_hw_init(mvi);
2843 if (rc)
2844 goto err_out_mvi;
2845
2846#ifndef MVS_DISABLE_MSI
2847 if (!pci_enable_msi(pdev)) {
2848 u32 tmp;
2849 void __iomem *regs = mvi->regs;
2850 mvi->flags |= MVF_MSI;
2851 irq_handler = mvs_msi_interrupt;
2852 tmp = mr32(PCS);
2853 mw32(PCS, tmp | PCS_SELF_CLEAR);
2854 }
2855#endif
2856
2857 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
2858 if (rc)
2859 goto err_out_msi;
2860
2861 rc = scsi_add_host(mvi->shost, &pdev->dev);
2862 if (rc)
2863 goto err_out_irq;
2864
2865 rc = sas_register_ha(&mvi->sas);
2866 if (rc)
2867 goto err_out_shost;
2868
2869 pci_set_drvdata(pdev, mvi);
2870
2871 mvs_print_info(mvi);
2872
2873 scsi_scan_host(mvi->shost);
2874
2875 return 0;
2876
2877err_out_shost:
2878 scsi_remove_host(mvi->shost);
2879err_out_irq:
2880 free_irq(pdev->irq, mvi);
2881err_out_msi:
2882 if (mvi->flags |= MVF_MSI)
2883 pci_disable_msi(pdev);
2884err_out_mvi:
2885 mvs_free(mvi);
2886err_out_regions:
2887 pci_release_regions(pdev);
2888err_out_disable:
2889 pci_disable_device(pdev);
2890 return rc;
2891}
2892
2893static void __devexit mvs_pci_remove(struct pci_dev *pdev)
2894{
2895 struct mvs_info *mvi = pci_get_drvdata(pdev);
2896
2897 pci_set_drvdata(pdev, NULL);
2898
2899 if (mvi) {
2900 sas_unregister_ha(&mvi->sas);
2901 mvs_hba_interrupt_disable(mvi);
2902 sas_remove_host(mvi->shost);
2903 scsi_remove_host(mvi->shost);
2904
2905 free_irq(pdev->irq, mvi);
2906 if (mvi->flags & MVF_MSI)
2907 pci_disable_msi(pdev);
2908 mvs_free(mvi);
2909 pci_release_regions(pdev);
2910 }
2911 pci_disable_device(pdev);
2912}
2913
2914static struct sas_domain_function_template mvs_transport_ops = {
2915 .lldd_execute_task = mvs_task_exec,
2916 .lldd_control_phy = mvs_phy_control,
2917 .lldd_abort_task = mvs_task_abort,
2918 .lldd_port_formed = mvs_port_formed
2919};
2920
2921static struct pci_device_id __devinitdata mvs_pci_table[] = {
2922 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
2923 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
2924 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
2925 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
2926
2927 { } /* terminate list */
2928};
2929
2930static struct pci_driver mvs_pci_driver = {
2931 .name = DRV_NAME,
2932 .id_table = mvs_pci_table,
2933 .probe = mvs_pci_init,
2934 .remove = __devexit_p(mvs_pci_remove),
2935};
2936
2937static int __init mvs_init(void)
2938{
2939 int rc;
2940
2941 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
2942 if (!mvs_stt)
2943 return -ENOMEM;
2944
2945 rc = pci_register_driver(&mvs_pci_driver);
2946 if (rc)
2947 goto err_out;
2948
2949 return 0;
2950
2951err_out:
2952 sas_release_transport(mvs_stt);
2953 return rc;
2954}
2955
2956static void __exit mvs_exit(void)
2957{
2958 pci_unregister_driver(&mvs_pci_driver);
2959 sas_release_transport(mvs_stt);
2960}
2961
2962module_init(mvs_init);
2963module_exit(mvs_exit);
2964
2965MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
2966MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
2967MODULE_VERSION(DRV_VERSION);
2968MODULE_LICENSE("GPL");
2969MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 0cd614a0fa73..fad6cb5cba28 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -124,7 +124,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf)
124 } 124 }
125 req_len += sgpnt->length; 125 req_len += sgpnt->length;
126 } 126 }
127 scsi_set_resid(cmd, req_len - act_len); 127 scsi_set_resid(cmd, buflen - act_len);
128 return 0; 128 return 0;
129} 129}
130 130
@@ -427,7 +427,7 @@ static struct scsi_host_template ps3rom_host_template = {
427 .cmd_per_lun = 1, 427 .cmd_per_lun = 1,
428 .emulated = 1, /* only sg driver uses this */ 428 .emulated = 1, /* only sg driver uses this */
429 .max_sectors = PS3ROM_MAX_SECTORS, 429 .max_sectors = PS3ROM_MAX_SECTORS,
430 .use_clustering = ENABLE_CLUSTERING, 430 .use_clustering = DISABLE_CLUSTERING,
431 .module = THIS_MODULE, 431 .module = THIS_MODULE,
432}; 432};
433 433
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 1479c60441c8..2cd899bfe84b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -23,7 +23,7 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
23 mutex_lock(&ha->fce_mutex); 23 mutex_lock(&ha->fce_mutex);
24 24
25 seq_printf(s, "FCE Trace Buffer\n"); 25 seq_printf(s, "FCE Trace Buffer\n");
26 seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr); 26 seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
27 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); 27 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
28 seq_printf(s, "FCE Enable Registers\n"); 28 seq_printf(s, "FCE Enable Registers\n");
29 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", 29 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 6226d88479f5..c1808763d40e 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -39,7 +39,7 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
39 ms_pkt->entry_count = 1; 39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); 40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 41 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = __constant_cpu_to_le16(25); 42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 43 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
44 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); 44 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 45 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
@@ -75,7 +75,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
75 ct_pkt->entry_type = CT_IOCB_TYPE; 75 ct_pkt->entry_type = CT_IOCB_TYPE;
76 ct_pkt->entry_count = 1; 76 ct_pkt->entry_count = 1;
77 ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); 77 ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS);
78 ct_pkt->timeout = __constant_cpu_to_le16(25); 78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
79 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 79 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
80 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 80 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
81 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 81 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
@@ -1144,7 +1144,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1144 ms_pkt->entry_count = 1; 1144 ms_pkt->entry_count = 1;
1145 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); 1145 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id);
1146 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 1146 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
1147 ms_pkt->timeout = __constant_cpu_to_le16(59); 1147 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1148 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1148 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1149 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); 1149 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
1150 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 1150 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
@@ -1181,7 +1181,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1181 ct_pkt->entry_type = CT_IOCB_TYPE; 1181 ct_pkt->entry_type = CT_IOCB_TYPE;
1182 ct_pkt->entry_count = 1; 1182 ct_pkt->entry_count = 1;
1183 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1183 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
1184 ct_pkt->timeout = __constant_cpu_to_le16(59); 1184 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1185 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1185 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1186 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1186 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
1187 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1187 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
@@ -1761,7 +1761,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1761 ct_pkt->entry_type = CT_IOCB_TYPE; 1761 ct_pkt->entry_type = CT_IOCB_TYPE;
1762 ct_pkt->entry_count = 1; 1762 ct_pkt->entry_count = 1;
1763 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1763 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id);
1764 ct_pkt->timeout = __constant_cpu_to_le16(59); 1764 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1765 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1765 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1766 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1766 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
1767 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1767 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d5c7853e7eba..364be7d06875 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1733,8 +1733,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1733 ha->login_timeout = nv->login_timeout; 1733 ha->login_timeout = nv->login_timeout;
1734 icb->login_timeout = nv->login_timeout; 1734 icb->login_timeout = nv->login_timeout;
1735 1735
1736 /* Set minimum RATOV to 200 tenths of a second. */ 1736 /* Set minimum RATOV to 100 tenths of a second. */
1737 ha->r_a_tov = 200; 1737 ha->r_a_tov = 100;
1738 1738
1739 ha->loop_reset_delay = nv->reset_delay; 1739 ha->loop_reset_delay = nv->reset_delay;
1740 1740
@@ -3645,8 +3645,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3645 ha->login_timeout = le16_to_cpu(nv->login_timeout); 3645 ha->login_timeout = le16_to_cpu(nv->login_timeout);
3646 icb->login_timeout = cpu_to_le16(nv->login_timeout); 3646 icb->login_timeout = cpu_to_le16(nv->login_timeout);
3647 3647
3648 /* Set minimum RATOV to 200 tenths of a second. */ 3648 /* Set minimum RATOV to 100 tenths of a second. */
3649 ha->r_a_tov = 200; 3649 ha->r_a_tov = 100;
3650 3650
3651 ha->loop_reset_delay = nv->reset_delay; 3651 ha->loop_reset_delay = nv->reset_delay;
3652 3652
@@ -4022,7 +4022,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
4022 return; 4022 return;
4023 4023
4024 ret = qla2x00_stop_firmware(ha); 4024 ret = qla2x00_stop_firmware(ha);
4025 for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) { 4025 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4026 retries ; retries--) {
4026 qla2x00_reset_chip(ha); 4027 qla2x00_reset_chip(ha);
4027 if (qla2x00_chip_diag(ha) != QLA_SUCCESS) 4028 if (qla2x00_chip_diag(ha) != QLA_SUCCESS)
4028 continue; 4029 continue;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 14e6f22944b7..f0337036c7bb 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -958,6 +958,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
958 } 958 }
959 } 959 }
960 960
961 /* Check for overrun. */
962 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
963 scsi_status & SS_RESIDUAL_OVER)
964 comp_status = CS_DATA_OVERRUN;
965
961 /* 966 /*
962 * Based on Host and scsi status generate status code for Linux 967 * Based on Host and scsi status generate status code for Linux
963 */ 968 */
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 99d29fff836d..bb103580e1ba 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2206,7 +2206,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2206 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; 2206 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2207 tsk->p.tsk.entry_count = 1; 2207 tsk->p.tsk.entry_count = 1;
2208 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); 2208 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2209 tsk->p.tsk.timeout = __constant_cpu_to_le16(25); 2209 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2210 tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); 2210 tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET);
2211 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 2211 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2212 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 2212 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c5742cc15abb..ea08a129fee9 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.00-k8" 10#define QLA2XXX_VERSION "8.02.00-k9"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 10b3b9a620f3..109c5f5985ec 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1299,9 +1299,9 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
1299 ddb_entry->fw_ddb_device_state = state; 1299 ddb_entry->fw_ddb_device_state = state;
1300 /* Device is back online. */ 1300 /* Device is back online. */
1301 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { 1301 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
1302 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1302 atomic_set(&ddb_entry->port_down_timer, 1303 atomic_set(&ddb_entry->port_down_timer,
1303 ha->port_down_retry_count); 1304 ha->port_down_retry_count);
1304 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1305 atomic_set(&ddb_entry->relogin_retry_count, 0); 1305 atomic_set(&ddb_entry->relogin_retry_count, 0);
1306 atomic_set(&ddb_entry->relogin_timer, 0); 1306 atomic_set(&ddb_entry->relogin_timer, 0);
1307 clear_bit(DF_RELOGIN, &ddb_entry->flags); 1307 clear_bit(DF_RELOGIN, &ddb_entry->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 0f029d0d7315..fc84db4069f4 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -100,8 +100,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
100 100
101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { 101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
102 scsi_set_resid(cmd, residual); 102 scsi_set_resid(cmd, residual);
103 if (!scsi_status && ((scsi_bufflen(cmd) - residual) < 103 if ((scsi_bufflen(cmd) - residual) < cmd->underflow) {
104 cmd->underflow)) {
105 104
106 cmd->result = DID_ERROR << 16; 105 cmd->result = DID_ERROR << 16;
107 106
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c3c59d763037..8b92f348f02c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -75,6 +75,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
75static int qla4xxx_slave_alloc(struct scsi_device *device); 75static int qla4xxx_slave_alloc(struct scsi_device *device);
76static int qla4xxx_slave_configure(struct scsi_device *device); 76static int qla4xxx_slave_configure(struct scsi_device *device);
77static void qla4xxx_slave_destroy(struct scsi_device *sdev); 77static void qla4xxx_slave_destroy(struct scsi_device *sdev);
78static void qla4xxx_scan_start(struct Scsi_Host *shost);
78 79
79static struct scsi_host_template qla4xxx_driver_template = { 80static struct scsi_host_template qla4xxx_driver_template = {
80 .module = THIS_MODULE, 81 .module = THIS_MODULE,
@@ -90,6 +91,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
90 .slave_destroy = qla4xxx_slave_destroy, 91 .slave_destroy = qla4xxx_slave_destroy,
91 92
92 .scan_finished = iscsi_scan_finished, 93 .scan_finished = iscsi_scan_finished,
94 .scan_start = qla4xxx_scan_start,
93 95
94 .this_id = -1, 96 .this_id = -1,
95 .cmd_per_lun = 3, 97 .cmd_per_lun = 3,
@@ -299,6 +301,18 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha)
299 return ddb_entry; 301 return ddb_entry;
300} 302}
301 303
304static void qla4xxx_scan_start(struct Scsi_Host *shost)
305{
306 struct scsi_qla_host *ha = shost_priv(shost);
307 struct ddb_entry *ddb_entry, *ddbtemp;
308
309 /* finish setup of sessions that were already setup in firmware */
310 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
311 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
312 qla4xxx_add_sess(ddb_entry);
313 }
314}
315
302/* 316/*
303 * Timer routines 317 * Timer routines
304 */ 318 */
@@ -864,8 +878,9 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha)
864 * qla4xxx_recover_adapter - recovers adapter after a fatal error 878 * qla4xxx_recover_adapter - recovers adapter after a fatal error
865 * @ha: Pointer to host adapter structure. 879 * @ha: Pointer to host adapter structure.
866 * @renew_ddb_list: Indicates what to do with the adapter's ddb list 880 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
867 * after adapter recovery has completed. 881 *
868 * 0=preserve ddb list, 1=destroy and rebuild ddb list 882 * renew_ddb_list value can be 0=preserve ddb list, 1=destroy and rebuild
883 * ddb list.
869 **/ 884 **/
870static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, 885static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
871 uint8_t renew_ddb_list) 886 uint8_t renew_ddb_list)
@@ -874,6 +889,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha,
874 889
875 /* Stall incoming I/O until we are done */ 890 /* Stall incoming I/O until we are done */
876 clear_bit(AF_ONLINE, &ha->flags); 891 clear_bit(AF_ONLINE, &ha->flags);
892
877 DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, 893 DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no,
878 __func__)); 894 __func__));
879 895
@@ -1176,7 +1192,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1176 int ret = -ENODEV, status; 1192 int ret = -ENODEV, status;
1177 struct Scsi_Host *host; 1193 struct Scsi_Host *host;
1178 struct scsi_qla_host *ha; 1194 struct scsi_qla_host *ha;
1179 struct ddb_entry *ddb_entry, *ddbtemp;
1180 uint8_t init_retry_count = 0; 1195 uint8_t init_retry_count = 0;
1181 char buf[34]; 1196 char buf[34];
1182 1197
@@ -1295,13 +1310,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1295 if (ret) 1310 if (ret)
1296 goto probe_failed; 1311 goto probe_failed;
1297 1312
1298 /* Update transport device information for all devices. */
1299 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
1300 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE)
1301 if (qla4xxx_add_sess(ddb_entry))
1302 goto remove_host;
1303 }
1304
1305 printk(KERN_INFO 1313 printk(KERN_INFO
1306 " QLogic iSCSI HBA Driver version: %s\n" 1314 " QLogic iSCSI HBA Driver version: %s\n"
1307 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 1315 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -1311,10 +1319,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1311 scsi_scan_host(host); 1319 scsi_scan_host(host);
1312 return 0; 1320 return 0;
1313 1321
1314remove_host:
1315 qla4xxx_free_ddb_list(ha);
1316 scsi_remove_host(host);
1317
1318probe_failed: 1322probe_failed:
1319 qla4xxx_free_adapter(ha); 1323 qla4xxx_free_adapter(ha);
1320 scsi_host_put(ha->host); 1324 scsi_host_put(ha->host);
@@ -1600,9 +1604,12 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
1600 return FAILED; 1604 return FAILED;
1601 } 1605 }
1602 1606
1603 if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) { 1607 /* make sure the dpc thread is stopped while we reset the hba */
1608 clear_bit(AF_ONLINE, &ha->flags);
1609 flush_workqueue(ha->dpc_thread);
1610
1611 if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS)
1604 return_status = SUCCESS; 1612 return_status = SUCCESS;
1605 }
1606 1613
1607 dev_info(&ha->pdev->dev, "HOST RESET %s.\n", 1614 dev_info(&ha->pdev->dev, "HOST RESET %s.\n",
1608 return_status == FAILED ? "FAILED" : "SUCCEDED"); 1615 return_status == FAILED ? "FAILED" : "SUCCEDED");
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 65455ab1f3b9..4a1cf6377f6c 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -651,7 +651,7 @@ static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
651 651
652static irqreturn_t qpti_intr(int irq, void *dev_id); 652static irqreturn_t qpti_intr(int irq, void *dev_id);
653 653
654static void __init qpti_chain_add(struct qlogicpti *qpti) 654static void __devinit qpti_chain_add(struct qlogicpti *qpti)
655{ 655{
656 spin_lock_irq(&qptichain_lock); 656 spin_lock_irq(&qptichain_lock);
657 if (qptichain != NULL) { 657 if (qptichain != NULL) {
@@ -667,7 +667,7 @@ static void __init qpti_chain_add(struct qlogicpti *qpti)
667 spin_unlock_irq(&qptichain_lock); 667 spin_unlock_irq(&qptichain_lock);
668} 668}
669 669
670static void __init qpti_chain_del(struct qlogicpti *qpti) 670static void __devexit qpti_chain_del(struct qlogicpti *qpti)
671{ 671{
672 spin_lock_irq(&qptichain_lock); 672 spin_lock_irq(&qptichain_lock);
673 if (qptichain == qpti) { 673 if (qptichain == qpti) {
@@ -682,7 +682,7 @@ static void __init qpti_chain_del(struct qlogicpti *qpti)
682 spin_unlock_irq(&qptichain_lock); 682 spin_unlock_irq(&qptichain_lock);
683} 683}
684 684
685static int __init qpti_map_regs(struct qlogicpti *qpti) 685static int __devinit qpti_map_regs(struct qlogicpti *qpti)
686{ 686{
687 struct sbus_dev *sdev = qpti->sdev; 687 struct sbus_dev *sdev = qpti->sdev;
688 688
@@ -705,7 +705,7 @@ static int __init qpti_map_regs(struct qlogicpti *qpti)
705 return 0; 705 return 0;
706} 706}
707 707
708static int __init qpti_register_irq(struct qlogicpti *qpti) 708static int __devinit qpti_register_irq(struct qlogicpti *qpti)
709{ 709{
710 struct sbus_dev *sdev = qpti->sdev; 710 struct sbus_dev *sdev = qpti->sdev;
711 711
@@ -730,7 +730,7 @@ fail:
730 return -1; 730 return -1;
731} 731}
732 732
733static void __init qpti_get_scsi_id(struct qlogicpti *qpti) 733static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
734{ 734{
735 qpti->scsi_id = prom_getintdefault(qpti->prom_node, 735 qpti->scsi_id = prom_getintdefault(qpti->prom_node,
736 "initiator-id", 736 "initiator-id",
@@ -783,7 +783,7 @@ static void qpti_get_clock(struct qlogicpti *qpti)
783/* The request and response queues must each be aligned 783/* The request and response queues must each be aligned
784 * on a page boundary. 784 * on a page boundary.
785 */ 785 */
786static int __init qpti_map_queues(struct qlogicpti *qpti) 786static int __devinit qpti_map_queues(struct qlogicpti *qpti)
787{ 787{
788 struct sbus_dev *sdev = qpti->sdev; 788 struct sbus_dev *sdev = qpti->sdev;
789 789
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index fecba05b4e77..e5c6f6af8765 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
757 "Notifying upper driver of completion " 757 "Notifying upper driver of completion "
758 "(result %x)\n", cmd->result)); 758 "(result %x)\n", cmd->result));
759 759
760 good_bytes = scsi_bufflen(cmd); 760 good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len;
761 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { 761 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
762 drv = scsi_cmd_to_driver(cmd); 762 drv = scsi_cmd_to_driver(cmd);
763 if (drv->done) 763 if (drv->done)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1541c174937a..d1777a9a9625 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -222,7 +222,7 @@ static struct scsi_host_template sdebug_driver_template = {
222 .cmd_per_lun = 16, 222 .cmd_per_lun = 16,
223 .max_sectors = 0xffff, 223 .max_sectors = 0xffff,
224 .unchecked_isa_dma = 0, 224 .unchecked_isa_dma = 0,
225 .use_clustering = ENABLE_CLUSTERING, 225 .use_clustering = DISABLE_CLUSTERING,
226 .module = THIS_MODULE, 226 .module = THIS_MODULE,
227}; 227};
228 228
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 1dc165ad17fb..e67c14e31bab 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1577} 1577}
1578 1578
1579/** 1579/**
1580 * scsi_scan_target - scan a target id, possibly including all LUNs on the 1580 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1581 * target.
1582 * @parent: host to scan 1581 * @parent: host to scan
1583 * @channel: channel to scan 1582 * @channel: channel to scan
1584 * @id: target id to scan 1583 * @id: target id to scan
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index 3677fbb30b72..a0f308bd145b 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -103,7 +103,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
103 if (!cmd) 103 if (!cmd)
104 goto release_rq; 104 goto release_rq;
105 105
106 memset(cmd, 0, sizeof(*cmd));
107 cmd->sc_data_direction = data_dir; 106 cmd->sc_data_direction = data_dir;
108 cmd->jiffies_at_alloc = jiffies; 107 cmd->jiffies_at_alloc = jiffies;
109 cmd->request = rq; 108 cmd->request = rq;
@@ -382,6 +381,11 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
382 scsi_release_buffers(cmd); 381 scsi_release_buffers(cmd);
383 goto unmap_rq; 382 goto unmap_rq;
384 } 383 }
384 /*
385 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
386 * length for us.
387 */
388 cmd->sdb.length = rq->data_len;
385 389
386 return 0; 390 return 0;
387 391
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fac7534f3ec4..ca7bb6f63bde 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -33,7 +33,7 @@
33#define ISCSI_SESSION_ATTRS 19 33#define ISCSI_SESSION_ATTRS 19
34#define ISCSI_CONN_ATTRS 13 34#define ISCSI_CONN_ATTRS 13
35#define ISCSI_HOST_ATTRS 4 35#define ISCSI_HOST_ATTRS 4
36#define ISCSI_TRANSPORT_VERSION "2.0-868" 36#define ISCSI_TRANSPORT_VERSION "2.0-869"
37 37
38struct iscsi_internal { 38struct iscsi_internal {
39 int daemon_pid; 39 int daemon_pid;
@@ -231,7 +231,7 @@ static struct {
231 { ISCSI_SESSION_FREE, "FREE" }, 231 { ISCSI_SESSION_FREE, "FREE" },
232}; 232};
233 233
234const char *iscsi_session_state_name(int state) 234static const char *iscsi_session_state_name(int state)
235{ 235{
236 int i; 236 int i;
237 char *name = NULL; 237 char *name = NULL;
@@ -373,24 +373,25 @@ static void session_recovery_timedout(struct work_struct *work)
373 scsi_target_unblock(&session->dev); 373 scsi_target_unblock(&session->dev);
374} 374}
375 375
376void __iscsi_unblock_session(struct iscsi_cls_session *session) 376static void __iscsi_unblock_session(struct work_struct *work)
377{
378 if (!cancel_delayed_work(&session->recovery_work))
379 flush_workqueue(iscsi_eh_timer_workq);
380 scsi_target_unblock(&session->dev);
381}
382
383void iscsi_unblock_session(struct iscsi_cls_session *session)
384{ 377{
378 struct iscsi_cls_session *session =
379 container_of(work, struct iscsi_cls_session,
380 unblock_work);
385 struct Scsi_Host *shost = iscsi_session_to_shost(session); 381 struct Scsi_Host *shost = iscsi_session_to_shost(session);
386 struct iscsi_host *ihost = shost->shost_data; 382 struct iscsi_host *ihost = shost->shost_data;
387 unsigned long flags; 383 unsigned long flags;
388 384
385 /*
386 * The recovery and unblock work get run from the same workqueue,
387 * so try to cancel it if it was going to run after this unblock.
388 */
389 cancel_delayed_work(&session->recovery_work);
389 spin_lock_irqsave(&session->lock, flags); 390 spin_lock_irqsave(&session->lock, flags);
390 session->state = ISCSI_SESSION_LOGGED_IN; 391 session->state = ISCSI_SESSION_LOGGED_IN;
391 spin_unlock_irqrestore(&session->lock, flags); 392 spin_unlock_irqrestore(&session->lock, flags);
392 393 /* start IO */
393 __iscsi_unblock_session(session); 394 scsi_target_unblock(&session->dev);
394 /* 395 /*
395 * Only do kernel scanning if the driver is properly hooked into 396 * Only do kernel scanning if the driver is properly hooked into
396 * the async scanning code (drivers like iscsi_tcp do login and 397 * the async scanning code (drivers like iscsi_tcp do login and
@@ -401,20 +402,43 @@ void iscsi_unblock_session(struct iscsi_cls_session *session)
401 atomic_inc(&ihost->nr_scans); 402 atomic_inc(&ihost->nr_scans);
402 } 403 }
403} 404}
405
406/**
407 * iscsi_unblock_session - set a session as logged in and start IO.
408 * @session: iscsi session
409 *
410 * Mark a session as ready to accept IO.
411 */
412void iscsi_unblock_session(struct iscsi_cls_session *session)
413{
414 queue_work(iscsi_eh_timer_workq, &session->unblock_work);
415 /*
416 * make sure all the events have completed before tell the driver
417 * it is safe
418 */
419 flush_workqueue(iscsi_eh_timer_workq);
420}
404EXPORT_SYMBOL_GPL(iscsi_unblock_session); 421EXPORT_SYMBOL_GPL(iscsi_unblock_session);
405 422
406void iscsi_block_session(struct iscsi_cls_session *session) 423static void __iscsi_block_session(struct work_struct *work)
407{ 424{
425 struct iscsi_cls_session *session =
426 container_of(work, struct iscsi_cls_session,
427 block_work);
408 unsigned long flags; 428 unsigned long flags;
409 429
410 spin_lock_irqsave(&session->lock, flags); 430 spin_lock_irqsave(&session->lock, flags);
411 session->state = ISCSI_SESSION_FAILED; 431 session->state = ISCSI_SESSION_FAILED;
412 spin_unlock_irqrestore(&session->lock, flags); 432 spin_unlock_irqrestore(&session->lock, flags);
413
414 scsi_target_block(&session->dev); 433 scsi_target_block(&session->dev);
415 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, 434 queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
416 session->recovery_tmo * HZ); 435 session->recovery_tmo * HZ);
417} 436}
437
438void iscsi_block_session(struct iscsi_cls_session *session)
439{
440 queue_work(iscsi_eh_timer_workq, &session->block_work);
441}
418EXPORT_SYMBOL_GPL(iscsi_block_session); 442EXPORT_SYMBOL_GPL(iscsi_block_session);
419 443
420static void __iscsi_unbind_session(struct work_struct *work) 444static void __iscsi_unbind_session(struct work_struct *work)
@@ -463,6 +487,8 @@ iscsi_alloc_session(struct Scsi_Host *shost,
463 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 487 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
464 INIT_LIST_HEAD(&session->host_list); 488 INIT_LIST_HEAD(&session->host_list);
465 INIT_LIST_HEAD(&session->sess_list); 489 INIT_LIST_HEAD(&session->sess_list);
490 INIT_WORK(&session->unblock_work, __iscsi_unblock_session);
491 INIT_WORK(&session->block_work, __iscsi_block_session);
466 INIT_WORK(&session->unbind_work, __iscsi_unbind_session); 492 INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
467 INIT_WORK(&session->scan_work, iscsi_scan_session); 493 INIT_WORK(&session->scan_work, iscsi_scan_session);
468 spin_lock_init(&session->lock); 494 spin_lock_init(&session->lock);
@@ -575,24 +601,25 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
575 list_del(&session->sess_list); 601 list_del(&session->sess_list);
576 spin_unlock_irqrestore(&sesslock, flags); 602 spin_unlock_irqrestore(&sesslock, flags);
577 603
604 /* make sure there are no blocks/unblocks queued */
605 flush_workqueue(iscsi_eh_timer_workq);
606 /* make sure the timedout callout is not running */
607 if (!cancel_delayed_work(&session->recovery_work))
608 flush_workqueue(iscsi_eh_timer_workq);
578 /* 609 /*
579 * If we are blocked let commands flow again. The lld or iscsi 610 * If we are blocked let commands flow again. The lld or iscsi
580 * layer should set up the queuecommand to fail commands. 611 * layer should set up the queuecommand to fail commands.
612 * We assume that LLD will not be calling block/unblock while
613 * removing the session.
581 */ 614 */
582 spin_lock_irqsave(&session->lock, flags); 615 spin_lock_irqsave(&session->lock, flags);
583 session->state = ISCSI_SESSION_FREE; 616 session->state = ISCSI_SESSION_FREE;
584 spin_unlock_irqrestore(&session->lock, flags); 617 spin_unlock_irqrestore(&session->lock, flags);
585 __iscsi_unblock_session(session);
586 __iscsi_unbind_session(&session->unbind_work);
587 618
588 /* flush running scans */ 619 scsi_target_unblock(&session->dev);
620 /* flush running scans then delete devices */
589 flush_workqueue(ihost->scan_workq); 621 flush_workqueue(ihost->scan_workq);
590 /* 622 __iscsi_unbind_session(&session->unbind_work);
591 * If the session dropped while removing devices then we need to make
592 * sure it is not blocked
593 */
594 if (!cancel_delayed_work(&session->recovery_work))
595 flush_workqueue(iscsi_eh_timer_workq);
596 623
597 /* hw iscsi may not have removed all connections from session */ 624 /* hw iscsi may not have removed all connections from session */
598 err = device_for_each_child(&session->dev, NULL, 625 err = device_for_each_child(&session->dev, NULL,
@@ -802,23 +829,16 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
802 829
803void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) 830void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
804{ 831{
805 struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
806 struct nlmsghdr *nlh; 832 struct nlmsghdr *nlh;
807 struct sk_buff *skb; 833 struct sk_buff *skb;
808 struct iscsi_uevent *ev; 834 struct iscsi_uevent *ev;
809 struct iscsi_internal *priv; 835 struct iscsi_internal *priv;
810 int len = NLMSG_SPACE(sizeof(*ev)); 836 int len = NLMSG_SPACE(sizeof(*ev));
811 unsigned long flags;
812 837
813 priv = iscsi_if_transport_lookup(conn->transport); 838 priv = iscsi_if_transport_lookup(conn->transport);
814 if (!priv) 839 if (!priv)
815 return; 840 return;
816 841
817 spin_lock_irqsave(&session->lock, flags);
818 if (session->state == ISCSI_SESSION_LOGGED_IN)
819 session->state = ISCSI_SESSION_FAILED;
820 spin_unlock_irqrestore(&session->lock, flags);
821
822 skb = alloc_skb(len, GFP_ATOMIC); 842 skb = alloc_skb(len, GFP_ATOMIC);
823 if (!skb) { 843 if (!skb) {
824 iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " 844 iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 37df8bbe7f46..7aee64dbfbeb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1835,8 +1835,7 @@ static int sd_suspend(struct device *dev, pm_message_t mesg)
1835 goto done; 1835 goto done;
1836 } 1836 }
1837 1837
1838 if (mesg.event == PM_EVENT_SUSPEND && 1838 if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) {
1839 sdkp->device->manage_start_stop) {
1840 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 1839 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
1841 ret = sd_start_stop_device(sdkp, 0); 1840 ret = sd_start_stop_device(sdkp, 0);
1842 } 1841 }
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index a57fed47b39d..a6d96694d0a5 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -33,9 +33,9 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34 34
35struct ses_device { 35struct ses_device {
36 char *page1; 36 unsigned char *page1;
37 char *page2; 37 unsigned char *page2;
38 char *page10; 38 unsigned char *page10;
39 short page1_len; 39 short page1_len;
40 short page2_len; 40 short page2_len;
41 short page10_len; 41 short page10_len;
@@ -67,7 +67,7 @@ static int ses_probe(struct device *dev)
67static int ses_recv_diag(struct scsi_device *sdev, int page_code, 67static int ses_recv_diag(struct scsi_device *sdev, int page_code,
68 void *buf, int bufflen) 68 void *buf, int bufflen)
69{ 69{
70 char cmd[] = { 70 unsigned char cmd[] = {
71 RECEIVE_DIAGNOSTIC, 71 RECEIVE_DIAGNOSTIC,
72 1, /* Set PCV bit */ 72 1, /* Set PCV bit */
73 page_code, 73 page_code,
@@ -85,7 +85,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
85{ 85{
86 u32 result; 86 u32 result;
87 87
88 char cmd[] = { 88 unsigned char cmd[] = {
89 SEND_DIAGNOSTIC, 89 SEND_DIAGNOSTIC,
90 0x10, /* Set PF bit */ 90 0x10, /* Set PF bit */
91 0, 91 0,
@@ -104,13 +104,13 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
104 104
105static int ses_set_page2_descriptor(struct enclosure_device *edev, 105static int ses_set_page2_descriptor(struct enclosure_device *edev,
106 struct enclosure_component *ecomp, 106 struct enclosure_component *ecomp,
107 char *desc) 107 unsigned char *desc)
108{ 108{
109 int i, j, count = 0, descriptor = ecomp->number; 109 int i, j, count = 0, descriptor = ecomp->number;
110 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); 110 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
111 struct ses_device *ses_dev = edev->scratch; 111 struct ses_device *ses_dev = edev->scratch;
112 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 112 unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
113 char *desc_ptr = ses_dev->page2 + 8; 113 unsigned char *desc_ptr = ses_dev->page2 + 8;
114 114
115 /* Clear everything */ 115 /* Clear everything */
116 memset(desc_ptr, 0, ses_dev->page2_len - 8); 116 memset(desc_ptr, 0, ses_dev->page2_len - 8);
@@ -133,14 +133,14 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev,
133 return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 133 return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
134} 134}
135 135
136static char *ses_get_page2_descriptor(struct enclosure_device *edev, 136static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
137 struct enclosure_component *ecomp) 137 struct enclosure_component *ecomp)
138{ 138{
139 int i, j, count = 0, descriptor = ecomp->number; 139 int i, j, count = 0, descriptor = ecomp->number;
140 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); 140 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
141 struct ses_device *ses_dev = edev->scratch; 141 struct ses_device *ses_dev = edev->scratch;
142 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 142 unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
143 char *desc_ptr = ses_dev->page2 + 8; 143 unsigned char *desc_ptr = ses_dev->page2 + 8;
144 144
145 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 145 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
146 146
@@ -160,17 +160,18 @@ static char *ses_get_page2_descriptor(struct enclosure_device *edev,
160static void ses_get_fault(struct enclosure_device *edev, 160static void ses_get_fault(struct enclosure_device *edev,
161 struct enclosure_component *ecomp) 161 struct enclosure_component *ecomp)
162{ 162{
163 char *desc; 163 unsigned char *desc;
164 164
165 desc = ses_get_page2_descriptor(edev, ecomp); 165 desc = ses_get_page2_descriptor(edev, ecomp);
166 ecomp->fault = (desc[3] & 0x60) >> 4; 166 if (desc)
167 ecomp->fault = (desc[3] & 0x60) >> 4;
167} 168}
168 169
169static int ses_set_fault(struct enclosure_device *edev, 170static int ses_set_fault(struct enclosure_device *edev,
170 struct enclosure_component *ecomp, 171 struct enclosure_component *ecomp,
171 enum enclosure_component_setting val) 172 enum enclosure_component_setting val)
172{ 173{
173 char desc[4] = {0 }; 174 unsigned char desc[4] = {0 };
174 175
175 switch (val) { 176 switch (val) {
176 case ENCLOSURE_SETTING_DISABLED: 177 case ENCLOSURE_SETTING_DISABLED:
@@ -190,26 +191,28 @@ static int ses_set_fault(struct enclosure_device *edev,
190static void ses_get_status(struct enclosure_device *edev, 191static void ses_get_status(struct enclosure_device *edev,
191 struct enclosure_component *ecomp) 192 struct enclosure_component *ecomp)
192{ 193{
193 char *desc; 194 unsigned char *desc;
194 195
195 desc = ses_get_page2_descriptor(edev, ecomp); 196 desc = ses_get_page2_descriptor(edev, ecomp);
196 ecomp->status = (desc[0] & 0x0f); 197 if (desc)
198 ecomp->status = (desc[0] & 0x0f);
197} 199}
198 200
199static void ses_get_locate(struct enclosure_device *edev, 201static void ses_get_locate(struct enclosure_device *edev,
200 struct enclosure_component *ecomp) 202 struct enclosure_component *ecomp)
201{ 203{
202 char *desc; 204 unsigned char *desc;
203 205
204 desc = ses_get_page2_descriptor(edev, ecomp); 206 desc = ses_get_page2_descriptor(edev, ecomp);
205 ecomp->locate = (desc[2] & 0x02) ? 1 : 0; 207 if (desc)
208 ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
206} 209}
207 210
208static int ses_set_locate(struct enclosure_device *edev, 211static int ses_set_locate(struct enclosure_device *edev,
209 struct enclosure_component *ecomp, 212 struct enclosure_component *ecomp,
210 enum enclosure_component_setting val) 213 enum enclosure_component_setting val)
211{ 214{
212 char desc[4] = {0 }; 215 unsigned char desc[4] = {0 };
213 216
214 switch (val) { 217 switch (val) {
215 case ENCLOSURE_SETTING_DISABLED: 218 case ENCLOSURE_SETTING_DISABLED:
@@ -229,7 +232,7 @@ static int ses_set_active(struct enclosure_device *edev,
229 struct enclosure_component *ecomp, 232 struct enclosure_component *ecomp,
230 enum enclosure_component_setting val) 233 enum enclosure_component_setting val)
231{ 234{
232 char desc[4] = {0 }; 235 unsigned char desc[4] = {0 };
233 236
234 switch (val) { 237 switch (val) {
235 case ENCLOSURE_SETTING_DISABLED: 238 case ENCLOSURE_SETTING_DISABLED:
@@ -409,11 +412,11 @@ static int ses_intf_add(struct class_device *cdev,
409{ 412{
410 struct scsi_device *sdev = to_scsi_device(cdev->dev); 413 struct scsi_device *sdev = to_scsi_device(cdev->dev);
411 struct scsi_device *tmp_sdev; 414 struct scsi_device *tmp_sdev;
412 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr, 415 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr = NULL,
413 *addl_desc_ptr; 416 *addl_desc_ptr = NULL;
414 struct ses_device *ses_dev; 417 struct ses_device *ses_dev;
415 u32 result; 418 u32 result;
416 int i, j, types, len, components = 0; 419 int i, j, types, len, page7_len = 0, components = 0;
417 int err = -ENOMEM; 420 int err = -ENOMEM;
418 struct enclosure_device *edev; 421 struct enclosure_device *edev;
419 struct ses_component *scomp = NULL; 422 struct ses_component *scomp = NULL;
@@ -447,7 +450,7 @@ static int ses_intf_add(struct class_device *cdev,
447 * traversal routines more complex */ 450 * traversal routines more complex */
448 sdev_printk(KERN_ERR, sdev, 451 sdev_printk(KERN_ERR, sdev,
449 "FIXME driver has no support for subenclosures (%d)\n", 452 "FIXME driver has no support for subenclosures (%d)\n",
450 buf[1]); 453 hdr_buf[1]);
451 goto err_free; 454 goto err_free;
452 } 455 }
453 456
@@ -461,9 +464,8 @@ static int ses_intf_add(struct class_device *cdev,
461 goto recv_failed; 464 goto recv_failed;
462 465
463 types = buf[10]; 466 types = buf[10];
464 len = buf[11];
465 467
466 type_ptr = buf + 12 + len; 468 type_ptr = buf + 12 + buf[11];
467 469
468 for (i = 0; i < types; i++, type_ptr += 4) { 470 for (i = 0; i < types; i++, type_ptr += 4) {
469 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || 471 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
@@ -494,22 +496,21 @@ static int ses_intf_add(struct class_device *cdev,
494 /* The additional information page --- allows us 496 /* The additional information page --- allows us
495 * to match up the devices */ 497 * to match up the devices */
496 result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); 498 result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
497 if (result) 499 if (!result) {
498 goto no_page10; 500
499 501 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
500 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 502 buf = kzalloc(len, GFP_KERNEL);
501 buf = kzalloc(len, GFP_KERNEL); 503 if (!buf)
502 if (!buf) 504 goto err_free;
503 goto err_free; 505
504 506 result = ses_recv_diag(sdev, 10, buf, len);
505 result = ses_recv_diag(sdev, 10, buf, len); 507 if (result)
506 if (result) 508 goto recv_failed;
507 goto recv_failed; 509 ses_dev->page10 = buf;
508 ses_dev->page10 = buf; 510 ses_dev->page10_len = len;
509 ses_dev->page10_len = len; 511 buf = NULL;
510 buf = NULL; 512 }
511 513
512 no_page10:
513 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL); 514 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
514 if (!scomp) 515 if (!scomp)
515 goto err_free; 516 goto err_free;
@@ -530,7 +531,7 @@ static int ses_intf_add(struct class_device *cdev,
530 if (result) 531 if (result)
531 goto simple_populate; 532 goto simple_populate;
532 533
533 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 534 page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
534 /* add 1 for trailing '\0' we'll use */ 535 /* add 1 for trailing '\0' we'll use */
535 buf = kzalloc(len + 1, GFP_KERNEL); 536 buf = kzalloc(len + 1, GFP_KERNEL);
536 if (!buf) 537 if (!buf)
@@ -547,7 +548,8 @@ static int ses_intf_add(struct class_device *cdev,
547 len = (desc_ptr[2] << 8) + desc_ptr[3]; 548 len = (desc_ptr[2] << 8) + desc_ptr[3];
548 /* skip past overall descriptor */ 549 /* skip past overall descriptor */
549 desc_ptr += len + 4; 550 desc_ptr += len + 4;
550 addl_desc_ptr = ses_dev->page10 + 8; 551 if (ses_dev->page10)
552 addl_desc_ptr = ses_dev->page10 + 8;
551 } 553 }
552 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 554 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
553 components = 0; 555 components = 0;
@@ -557,29 +559,35 @@ static int ses_intf_add(struct class_device *cdev,
557 struct enclosure_component *ecomp; 559 struct enclosure_component *ecomp;
558 560
559 if (desc_ptr) { 561 if (desc_ptr) {
560 len = (desc_ptr[2] << 8) + desc_ptr[3]; 562 if (desc_ptr >= buf + page7_len) {
561 desc_ptr += 4; 563 desc_ptr = NULL;
562 /* Add trailing zero - pushes into 564 } else {
563 * reserved space */ 565 len = (desc_ptr[2] << 8) + desc_ptr[3];
564 desc_ptr[len] = '\0'; 566 desc_ptr += 4;
565 name = desc_ptr; 567 /* Add trailing zero - pushes into
568 * reserved space */
569 desc_ptr[len] = '\0';
570 name = desc_ptr;
571 }
566 } 572 }
567 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && 573 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
568 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) 574 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
569 continue; 575
570 ecomp = enclosure_component_register(edev, 576 ecomp = enclosure_component_register(edev,
571 components++, 577 components++,
572 type_ptr[0], 578 type_ptr[0],
573 name); 579 name);
574 if (desc_ptr) { 580
575 desc_ptr += len; 581 if (!IS_ERR(ecomp) && addl_desc_ptr)
576 if (!IS_ERR(ecomp))
577 ses_process_descriptor(ecomp, 582 ses_process_descriptor(ecomp,
578 addl_desc_ptr); 583 addl_desc_ptr);
579
580 if (addl_desc_ptr)
581 addl_desc_ptr += addl_desc_ptr[1] + 2;
582 } 584 }
585 if (desc_ptr)
586 desc_ptr += len;
587
588 if (addl_desc_ptr)
589 addl_desc_ptr += addl_desc_ptr[1] + 2;
590
583 } 591 }
584 } 592 }
585 kfree(buf); 593 kfree(buf);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71952703125a..0a52d9d2da2c 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20080117"; 20static const char *verstr = "20080221";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -1172,7 +1172,7 @@ static int st_open(struct inode *inode, struct file *filp)
1172 STp->try_dio_now = STp->try_dio; 1172 STp->try_dio_now = STp->try_dio;
1173 STp->recover_count = 0; 1173 STp->recover_count = 0;
1174 DEB( STp->nbr_waits = STp->nbr_finished = 0; 1174 DEB( STp->nbr_waits = STp->nbr_finished = 0;
1175 STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; ) 1175 STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = 0; )
1176 1176
1177 retval = check_tape(STp, filp); 1177 retval = check_tape(STp, filp);
1178 if (retval < 0) 1178 if (retval < 0)
@@ -1226,8 +1226,8 @@ static int st_flush(struct file *filp, fl_owner_t id)
1226 } 1226 }
1227 1227
1228 DEBC( if (STp->nbr_requests) 1228 DEBC( if (STp->nbr_requests)
1229 printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n", 1229 printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d.\n",
1230 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable)); 1230 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages));
1231 1231
1232 if (STps->rw == ST_WRITING && !STp->pos_unknown) { 1232 if (STps->rw == ST_WRITING && !STp->pos_unknown) {
1233 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 1233 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -1422,9 +1422,6 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
1422 if (STbp->do_dio) { 1422 if (STbp->do_dio) {
1423 STp->nbr_dio++; 1423 STp->nbr_dio++;
1424 STp->nbr_pages += STbp->do_dio; 1424 STp->nbr_pages += STbp->do_dio;
1425 for (i=1; i < STbp->do_dio; i++)
1426 if (page_to_pfn(STbp->sg[i].page) == page_to_pfn(STbp->sg[i-1].page) + 1)
1427 STp->nbr_combinable++;
1428 } 1425 }
1429 ) 1426 )
1430 } else 1427 } else
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 6c8075712974..5931726fcf93 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -164,7 +164,6 @@ struct scsi_tape {
164 int nbr_requests; 164 int nbr_requests;
165 int nbr_dio; 165 int nbr_dio;
166 int nbr_pages; 166 int nbr_pages;
167 int nbr_combinable;
168 unsigned char last_cmnd[6]; 167 unsigned char last_cmnd[6];
169 unsigned char last_sense[16]; 168 unsigned char last_sense[16];
170#endif 169#endif
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 72f6d8015358..654430edf74d 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -461,30 +461,14 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
461 } 461 }
462} 462}
463 463
464static int stex_direct_copy(struct scsi_cmnd *cmd,
465 const void *src, size_t count)
466{
467 size_t cp_len = count;
468 int n_elem = 0;
469
470 n_elem = scsi_dma_map(cmd);
471 if (n_elem < 0)
472 return 0;
473
474 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
475
476 scsi_dma_unmap(cmd);
477
478 return cp_len == count;
479}
480
481static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) 464static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
482{ 465{
483 struct st_frame *p; 466 struct st_frame *p;
484 size_t count = sizeof(struct st_frame); 467 size_t count = sizeof(struct st_frame);
485 468
486 p = hba->copy_buffer; 469 p = hba->copy_buffer;
487 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD); 470 stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd),
471 ST_FROM_CMD);
488 memset(p->base, 0, sizeof(u32)*6); 472 memset(p->base, 0, sizeof(u32)*6);
489 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); 473 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
490 p->rom_addr = 0; 474 p->rom_addr = 0;
@@ -502,7 +486,8 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
502 p->subid = 486 p->subid =
503 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; 487 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
504 488
505 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_TO_CMD); 489 stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd),
490 ST_TO_CMD);
506} 491}
507 492
508static void 493static void
@@ -569,8 +554,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
569 unsigned char page; 554 unsigned char page;
570 page = cmd->cmnd[2] & 0x3f; 555 page = cmd->cmnd[2] & 0x3f;
571 if (page == 0x8 || page == 0x3f) { 556 if (page == 0x8 || page == 0x3f) {
572 stex_direct_copy(cmd, ms10_caching_page, 557 size_t cp_len = sizeof(ms10_caching_page);
573 sizeof(ms10_caching_page)); 558 stex_internal_copy(cmd, ms10_caching_page,
559 &cp_len, scsi_sg_count(cmd),
560 ST_TO_CMD);
574 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 561 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
575 done(cmd); 562 done(cmd);
576 } else 563 } else
@@ -599,8 +586,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
599 if (id != host->max_id - 1) 586 if (id != host->max_id - 1)
600 break; 587 break;
601 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { 588 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
602 stex_direct_copy(cmd, console_inq_page, 589 size_t cp_len = sizeof(console_inq_page);
603 sizeof(console_inq_page)); 590 stex_internal_copy(cmd, console_inq_page,
591 &cp_len, scsi_sg_count(cmd),
592 ST_TO_CMD);
604 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 593 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
605 done(cmd); 594 done(cmd);
606 } else 595 } else
@@ -609,6 +598,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
609 case PASSTHRU_CMD: 598 case PASSTHRU_CMD:
610 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { 599 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
611 struct st_drvver ver; 600 struct st_drvver ver;
601 size_t cp_len = sizeof(ver);
612 ver.major = ST_VER_MAJOR; 602 ver.major = ST_VER_MAJOR;
613 ver.minor = ST_VER_MINOR; 603 ver.minor = ST_VER_MINOR;
614 ver.oem = ST_OEM; 604 ver.oem = ST_OEM;
@@ -616,7 +606,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
616 ver.signature[0] = PASSTHRU_SIGNATURE; 606 ver.signature[0] = PASSTHRU_SIGNATURE;
617 ver.console_id = host->max_id - 1; 607 ver.console_id = host->max_id - 1;
618 ver.host_no = hba->host->host_no; 608 ver.host_no = hba->host->host_no;
619 cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ? 609 stex_internal_copy(cmd, &ver, &cp_len,
610 scsi_sg_count(cmd), ST_TO_CMD);
611 cmd->result = sizeof(ver) == cp_len ?
620 DID_OK << 16 | COMMAND_COMPLETE << 8 : 612 DID_OK << 16 | COMMAND_COMPLETE << 8 :
621 DID_ERROR << 16 | COMMAND_COMPLETE << 8; 613 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
622 done(cmd); 614 done(cmd);
@@ -709,7 +701,7 @@ static void stex_copy_data(struct st_ccb *ccb,
709 if (ccb->cmd == NULL) 701 if (ccb->cmd == NULL)
710 return; 702 return;
711 stex_internal_copy(ccb->cmd, 703 stex_internal_copy(ccb->cmd,
712 resp->variable, &count, ccb->sg_count, ST_TO_CMD); 704 resp->variable, &count, scsi_sg_count(ccb->cmd), ST_TO_CMD);
713} 705}
714 706
715static void stex_ys_commands(struct st_hba *hba, 707static void stex_ys_commands(struct st_hba *hba,
@@ -734,7 +726,7 @@ static void stex_ys_commands(struct st_hba *hba,
734 726
735 count = STEX_EXTRA_SIZE; 727 count = STEX_EXTRA_SIZE;
736 stex_internal_copy(ccb->cmd, hba->copy_buffer, 728 stex_internal_copy(ccb->cmd, hba->copy_buffer,
737 &count, ccb->sg_count, ST_FROM_CMD); 729 &count, scsi_sg_count(ccb->cmd), ST_FROM_CMD);
738 inq_data = (ST_INQ *)hba->copy_buffer; 730 inq_data = (ST_INQ *)hba->copy_buffer;
739 if (inq_data->DeviceTypeQualifier != 0) 731 if (inq_data->DeviceTypeQualifier != 0)
740 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; 732 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT;
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 6f09cbd7fc48..97c68d021d28 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -91,6 +91,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
91 /* Archtek America Corp. */ 91 /* Archtek America Corp. */
92 /* Archtek SmartLink Modem 3334BT Plug & Play */ 92 /* Archtek SmartLink Modem 3334BT Plug & Play */
93 { "GVC000F", 0 }, 93 { "GVC000F", 0 },
94 /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */
95 { "GVC0303", 0 },
94 /* Hayes */ 96 /* Hayes */
95 /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ 97 /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */
96 { "HAY0001", 0 }, 98 { "HAY0001", 0 },
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index b82595cf13e8..cf627cd1b4c8 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -686,7 +686,7 @@ config UART0_RTS_PIN
686 686
687config SERIAL_BFIN_UART1 687config SERIAL_BFIN_UART1
688 bool "Enable UART1" 688 bool "Enable UART1"
689 depends on SERIAL_BFIN && (BF534 || BF536 || BF537 || BF54x) 689 depends on SERIAL_BFIN && (!BF531 && !BF532 && !BF533 && !BF561)
690 help 690 help
691 Enable UART1 691 Enable UART1
692 692
@@ -699,14 +699,14 @@ config BFIN_UART1_CTSRTS
699 699
700config UART1_CTS_PIN 700config UART1_CTS_PIN
701 int "UART1 CTS pin" 701 int "UART1 CTS pin"
702 depends on BFIN_UART1_CTSRTS && (BF53x || BF561) 702 depends on BFIN_UART1_CTSRTS && !BF54x
703 default -1 703 default -1
704 help 704 help
705 Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. 705 Refer to ./include/asm-blackfin/gpio.h to see the GPIO map.
706 706
707config UART1_RTS_PIN 707config UART1_RTS_PIN
708 int "UART1 RTS pin" 708 int "UART1 RTS pin"
709 depends on BFIN_UART1_CTSRTS && (BF53x || BF561) 709 depends on BFIN_UART1_CTSRTS && !BF54x
710 default -1 710 default -1
711 help 711 help
712 Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. 712 Refer to ./include/asm-blackfin/gpio.h to see the GPIO map.
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index fad245b064d6..d57bf3e708d8 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -549,7 +549,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
549 atmel_handle_transmit(port, pending); 549 atmel_handle_transmit(port, pending);
550 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 550 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
551 551
552 return IRQ_HANDLED; 552 return pass_counter ? IRQ_HANDLED : IRQ_NONE;
553} 553}
554 554
555/* 555/*
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index ac2a3ef28d55..0aa345b9a38b 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -1,30 +1,11 @@
1/* 1/*
2 * File: drivers/serial/bfin_5xx.c 2 * Blackfin On-Chip Serial Driver
3 * Based on: Based on drivers/serial/sa1100.c
4 * Author: Aubrey Li <aubrey.li@analog.com>
5 * 3 *
6 * Created: 4 * Copyright 2006-2007 Analog Devices Inc.
7 * Description: Driver for blackfin 5xx serial ports
8 * 5 *
9 * Modified: 6 * Enter bugs at http://blackfin.uclinux.org/
10 * Copyright 2006 Analog Devices Inc.
11 * 7 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 8 * Licensed under the GPL-2 or later.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */ 9 */
29 10
30#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 11#if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -67,14 +48,12 @@
67#define DMA_RX_XCOUNT 512 48#define DMA_RX_XCOUNT 512
68#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) 49#define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT)
69 50
70#define DMA_RX_FLUSH_JIFFIES 5 51#define DMA_RX_FLUSH_JIFFIES (HZ / 50)
71 52
72#ifdef CONFIG_SERIAL_BFIN_DMA 53#ifdef CONFIG_SERIAL_BFIN_DMA
73static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); 54static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart);
74#else 55#else
75static void bfin_serial_do_work(struct work_struct *work);
76static void bfin_serial_tx_chars(struct bfin_serial_port *uart); 56static void bfin_serial_tx_chars(struct bfin_serial_port *uart);
77static void local_put_char(struct bfin_serial_port *uart, char ch);
78#endif 57#endif
79 58
80static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); 59static void bfin_serial_mctrl_check(struct bfin_serial_port *uart);
@@ -85,23 +64,26 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart);
85static void bfin_serial_stop_tx(struct uart_port *port) 64static void bfin_serial_stop_tx(struct uart_port *port)
86{ 65{
87 struct bfin_serial_port *uart = (struct bfin_serial_port *)port; 66 struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
67 struct circ_buf *xmit = &uart->port.info->xmit;
68#if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA)
69 unsigned short ier;
70#endif
88 71
89 while (!(UART_GET_LSR(uart) & TEMT)) 72 while (!(UART_GET_LSR(uart) & TEMT))
90 continue; 73 cpu_relax();
91 74
92#ifdef CONFIG_SERIAL_BFIN_DMA 75#ifdef CONFIG_SERIAL_BFIN_DMA
93 disable_dma(uart->tx_dma_channel); 76 disable_dma(uart->tx_dma_channel);
77 xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
78 uart->port.icount.tx += uart->tx_count;
79 uart->tx_count = 0;
80 uart->tx_done = 1;
94#else 81#else
95#ifdef CONFIG_BF54x 82#ifdef CONFIG_BF54x
96 /* Waiting for Transmission Finished */
97 while (!(UART_GET_LSR(uart) & TFI))
98 continue;
99 /* Clear TFI bit */ 83 /* Clear TFI bit */
100 UART_PUT_LSR(uart, TFI); 84 UART_PUT_LSR(uart, TFI);
101 UART_CLEAR_IER(uart, ETBEI); 85 UART_CLEAR_IER(uart, ETBEI);
102#else 86#else
103 unsigned short ier;
104
105 ier = UART_GET_IER(uart); 87 ier = UART_GET_IER(uart);
106 ier &= ~ETBEI; 88 ier &= ~ETBEI;
107 UART_PUT_IER(uart, ier); 89 UART_PUT_IER(uart, ier);
@@ -117,7 +99,8 @@ static void bfin_serial_start_tx(struct uart_port *port)
117 struct bfin_serial_port *uart = (struct bfin_serial_port *)port; 99 struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
118 100
119#ifdef CONFIG_SERIAL_BFIN_DMA 101#ifdef CONFIG_SERIAL_BFIN_DMA
120 bfin_serial_dma_tx_chars(uart); 102 if (uart->tx_done)
103 bfin_serial_dma_tx_chars(uart);
121#else 104#else
122#ifdef CONFIG_BF54x 105#ifdef CONFIG_BF54x
123 UART_SET_IER(uart, ETBEI); 106 UART_SET_IER(uart, ETBEI);
@@ -209,34 +192,27 @@ int kgdb_get_debug_char(void)
209} 192}
210#endif 193#endif
211 194
212#ifdef CONFIG_SERIAL_BFIN_PIO 195#if ANOMALY_05000230 && defined(CONFIG_SERIAL_BFIN_PIO)
213static void local_put_char(struct bfin_serial_port *uart, char ch) 196# define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold)
214{ 197# define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v))
215 unsigned short status; 198#else
216 int flags = 0; 199# define UART_GET_ANOMALY_THRESHOLD(uart) 0
217 200# define UART_SET_ANOMALY_THRESHOLD(uart, v)
218 spin_lock_irqsave(&uart->port.lock, flags); 201#endif
219
220 do {
221 status = UART_GET_LSR(uart);
222 } while (!(status & THRE));
223
224 UART_PUT_CHAR(uart, ch);
225 SSYNC();
226
227 spin_unlock_irqrestore(&uart->port.lock, flags);
228}
229 202
203#ifdef CONFIG_SERIAL_BFIN_PIO
230static void bfin_serial_rx_chars(struct bfin_serial_port *uart) 204static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
231{ 205{
232 struct tty_struct *tty = uart->port.info->tty; 206 struct tty_struct *tty = uart->port.info->tty;
233 unsigned int status, ch, flg; 207 unsigned int status, ch, flg;
234 static int in_break = 0; 208 static struct timeval anomaly_start = { .tv_sec = 0 };
235#ifdef CONFIG_KGDB_UART 209#ifdef CONFIG_KGDB_UART
236 struct pt_regs *regs = get_irq_regs(); 210 struct pt_regs *regs = get_irq_regs();
237#endif 211#endif
238 212
239 status = UART_GET_LSR(uart); 213 status = UART_GET_LSR(uart);
214 UART_CLEAR_LSR(uart);
215
240 ch = UART_GET_CHAR(uart); 216 ch = UART_GET_CHAR(uart);
241 uart->port.icount.rx++; 217 uart->port.icount.rx++;
242 218
@@ -262,28 +238,56 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
262#endif 238#endif
263 239
264 if (ANOMALY_05000230) { 240 if (ANOMALY_05000230) {
265 /* The BF533 family of processors have a nice misbehavior where 241 /* The BF533 (and BF561) family of processors have a nice anomaly
266 * they continuously generate characters for a "single" break. 242 * where they continuously generate characters for a "single" break.
267 * We have to basically ignore this flood until the "next" valid 243 * We have to basically ignore this flood until the "next" valid
268 * character comes across. All other Blackfin families operate 244 * character comes across. Due to the nature of the flood, it is
269 * properly though. 245 * not possible to reliably catch bytes that are sent too quickly
246 * after this break. So application code talking to the Blackfin
247 * which sends a break signal must allow at least 1.5 character
248 * times after the end of the break for things to stabilize. This
249 * timeout was picked as it must absolutely be larger than 1
250 * character time +/- some percent. So 1.5 sounds good. All other
251 * Blackfin families operate properly. Woo.
270 * Note: While Anomaly 05000230 does not directly address this, 252 * Note: While Anomaly 05000230 does not directly address this,
271 * the changes that went in for it also fixed this issue. 253 * the changes that went in for it also fixed this issue.
254 * That anomaly was fixed in 0.5+ silicon. I like bunnies.
272 */ 255 */
273 if (in_break) { 256 if (anomaly_start.tv_sec) {
274 if (ch != 0) { 257 struct timeval curr;
275 in_break = 0; 258 suseconds_t usecs;
276 ch = UART_GET_CHAR(uart); 259
277 if (bfin_revid() < 5) 260 if ((~ch & (~ch + 1)) & 0xff)
278 return; 261 goto known_good_char;
279 } else 262
280 return; 263 do_gettimeofday(&curr);
264 if (curr.tv_sec - anomaly_start.tv_sec > 1)
265 goto known_good_char;
266
267 usecs = 0;
268 if (curr.tv_sec != anomaly_start.tv_sec)
269 usecs += USEC_PER_SEC;
270 usecs += curr.tv_usec - anomaly_start.tv_usec;
271
272 if (usecs > UART_GET_ANOMALY_THRESHOLD(uart))
273 goto known_good_char;
274
275 if (ch)
276 anomaly_start.tv_sec = 0;
277 else
278 anomaly_start = curr;
279
280 return;
281
282 known_good_char:
283 anomaly_start.tv_sec = 0;
281 } 284 }
282 } 285 }
283 286
284 if (status & BI) { 287 if (status & BI) {
285 if (ANOMALY_05000230) 288 if (ANOMALY_05000230)
286 in_break = 1; 289 if (bfin_revid() < 5)
290 do_gettimeofday(&anomaly_start);
287 uart->port.icount.brk++; 291 uart->port.icount.brk++;
288 if (uart_handle_break(&uart->port)) 292 if (uart_handle_break(&uart->port))
289 goto ignore_char; 293 goto ignore_char;
@@ -324,7 +328,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
324 UART_PUT_CHAR(uart, uart->port.x_char); 328 UART_PUT_CHAR(uart, uart->port.x_char);
325 uart->port.icount.tx++; 329 uart->port.icount.tx++;
326 uart->port.x_char = 0; 330 uart->port.x_char = 0;
327 return;
328 } 331 }
329 /* 332 /*
330 * Check the modem control lines before 333 * Check the modem control lines before
@@ -337,9 +340,12 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
337 return; 340 return;
338 } 341 }
339 342
340 local_put_char(uart, xmit->buf[xmit->tail]); 343 while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) {
341 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 344 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
342 uart->port.icount.tx++; 345 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
346 uart->port.icount.tx++;
347 SSYNC();
348 }
343 349
344 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 350 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
345 uart_write_wakeup(&uart->port); 351 uart_write_wakeup(&uart->port);
@@ -352,21 +358,11 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id)
352{ 358{
353 struct bfin_serial_port *uart = dev_id; 359 struct bfin_serial_port *uart = dev_id;
354 360
355#ifdef CONFIG_BF54x
356 unsigned short status;
357 spin_lock(&uart->port.lock);
358 status = UART_GET_LSR(uart);
359 while ((UART_GET_IER(uart) & ERBFI) && (status & DR)) {
360 bfin_serial_rx_chars(uart);
361 status = UART_GET_LSR(uart);
362 }
363 spin_unlock(&uart->port.lock);
364#else
365 spin_lock(&uart->port.lock); 361 spin_lock(&uart->port.lock);
366 while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_RX_READY) 362 while (UART_GET_LSR(uart) & DR)
367 bfin_serial_rx_chars(uart); 363 bfin_serial_rx_chars(uart);
368 spin_unlock(&uart->port.lock); 364 spin_unlock(&uart->port.lock);
369#endif 365
370 return IRQ_HANDLED; 366 return IRQ_HANDLED;
371} 367}
372 368
@@ -374,25 +370,16 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
374{ 370{
375 struct bfin_serial_port *uart = dev_id; 371 struct bfin_serial_port *uart = dev_id;
376 372
377#ifdef CONFIG_BF54x
378 unsigned short status;
379 spin_lock(&uart->port.lock); 373 spin_lock(&uart->port.lock);
380 status = UART_GET_LSR(uart); 374 if (UART_GET_LSR(uart) & THRE)
381 while ((UART_GET_IER(uart) & ETBEI) && (status & THRE)) {
382 bfin_serial_tx_chars(uart); 375 bfin_serial_tx_chars(uart);
383 status = UART_GET_LSR(uart);
384 }
385 spin_unlock(&uart->port.lock); 376 spin_unlock(&uart->port.lock);
386#else 377
387 spin_lock(&uart->port.lock);
388 while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_TX_READY)
389 bfin_serial_tx_chars(uart);
390 spin_unlock(&uart->port.lock);
391#endif
392 return IRQ_HANDLED; 378 return IRQ_HANDLED;
393} 379}
380#endif
394 381
395 382#ifdef CONFIG_SERIAL_BFIN_CTSRTS
396static void bfin_serial_do_work(struct work_struct *work) 383static void bfin_serial_do_work(struct work_struct *work)
397{ 384{
398 struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); 385 struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue);
@@ -406,33 +393,27 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
406{ 393{
407 struct circ_buf *xmit = &uart->port.info->xmit; 394 struct circ_buf *xmit = &uart->port.info->xmit;
408 unsigned short ier; 395 unsigned short ier;
409 int flags = 0;
410
411 if (!uart->tx_done)
412 return;
413 396
414 uart->tx_done = 0; 397 uart->tx_done = 0;
415 398
399 if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
400 uart->tx_count = 0;
401 uart->tx_done = 1;
402 return;
403 }
404
416 if (uart->port.x_char) { 405 if (uart->port.x_char) {
417 UART_PUT_CHAR(uart, uart->port.x_char); 406 UART_PUT_CHAR(uart, uart->port.x_char);
418 uart->port.icount.tx++; 407 uart->port.icount.tx++;
419 uart->port.x_char = 0; 408 uart->port.x_char = 0;
420 uart->tx_done = 1;
421 return;
422 } 409 }
410
423 /* 411 /*
424 * Check the modem control lines before 412 * Check the modem control lines before
425 * transmitting anything. 413 * transmitting anything.
426 */ 414 */
427 bfin_serial_mctrl_check(uart); 415 bfin_serial_mctrl_check(uart);
428 416
429 if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) {
430 bfin_serial_stop_tx(&uart->port);
431 uart->tx_done = 1;
432 return;
433 }
434
435 spin_lock_irqsave(&uart->port.lock, flags);
436 uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); 417 uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
437 if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) 418 if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail))
438 uart->tx_count = UART_XMIT_SIZE - xmit->tail; 419 uart->tx_count = UART_XMIT_SIZE - xmit->tail;
@@ -448,6 +429,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
448 set_dma_x_count(uart->tx_dma_channel, uart->tx_count); 429 set_dma_x_count(uart->tx_dma_channel, uart->tx_count);
449 set_dma_x_modify(uart->tx_dma_channel, 1); 430 set_dma_x_modify(uart->tx_dma_channel, 1);
450 enable_dma(uart->tx_dma_channel); 431 enable_dma(uart->tx_dma_channel);
432
451#ifdef CONFIG_BF54x 433#ifdef CONFIG_BF54x
452 UART_SET_IER(uart, ETBEI); 434 UART_SET_IER(uart, ETBEI);
453#else 435#else
@@ -455,7 +437,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart)
455 ier |= ETBEI; 437 ier |= ETBEI;
456 UART_PUT_IER(uart, ier); 438 UART_PUT_IER(uart, ier);
457#endif 439#endif
458 spin_unlock_irqrestore(&uart->port.lock, flags);
459} 440}
460 441
461static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) 442static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
@@ -464,7 +445,11 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
464 int i, flg, status; 445 int i, flg, status;
465 446
466 status = UART_GET_LSR(uart); 447 status = UART_GET_LSR(uart);
467 uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);; 448 UART_CLEAR_LSR(uart);
449
450 uart->port.icount.rx +=
451 CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail,
452 UART_XMIT_SIZE);
468 453
469 if (status & BI) { 454 if (status & BI) {
470 uart->port.icount.brk++; 455 uart->port.icount.brk++;
@@ -490,10 +475,12 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
490 else 475 else
491 flg = TTY_NORMAL; 476 flg = TTY_NORMAL;
492 477
493 for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) { 478 for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) {
494 if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) 479 if (i >= UART_XMIT_SIZE)
495 goto dma_ignore_char; 480 i = 0;
496 uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg); 481 if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i]))
482 uart_insert_char(&uart->port, status, OE,
483 uart->rx_dma_buf.buf[i], flg);
497 } 484 }
498 485
499 dma_ignore_char: 486 dma_ignore_char:
@@ -503,23 +490,23 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart)
503void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) 490void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
504{ 491{
505 int x_pos, pos; 492 int x_pos, pos;
506 int flags = 0;
507
508 bfin_serial_dma_tx_chars(uart);
509 493
510 spin_lock_irqsave(&uart->port.lock, flags); 494 uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel);
511 x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel); 495 x_pos = get_dma_curr_xcount(uart->rx_dma_channel);
496 uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows;
497 if (uart->rx_dma_nrows == DMA_RX_YCOUNT)
498 uart->rx_dma_nrows = 0;
499 x_pos = DMA_RX_XCOUNT - x_pos;
512 if (x_pos == DMA_RX_XCOUNT) 500 if (x_pos == DMA_RX_XCOUNT)
513 x_pos = 0; 501 x_pos = 0;
514 502
515 pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; 503 pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos;
516 504 if (pos != uart->rx_dma_buf.tail) {
517 if (pos>uart->rx_dma_buf.tail) { 505 uart->rx_dma_buf.head = pos;
518 uart->rx_dma_buf.tail = pos;
519 bfin_serial_dma_rx_chars(uart); 506 bfin_serial_dma_rx_chars(uart);
520 uart->rx_dma_buf.head = uart->rx_dma_buf.tail; 507 uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
521 } 508 }
522 spin_unlock_irqrestore(&uart->port.lock, flags); 509
523 uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; 510 uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES;
524 add_timer(&(uart->rx_dma_timer)); 511 add_timer(&(uart->rx_dma_timer));
525} 512}
@@ -532,8 +519,8 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
532 519
533 spin_lock(&uart->port.lock); 520 spin_lock(&uart->port.lock);
534 if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { 521 if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
535 clear_dma_irqstat(uart->tx_dma_channel);
536 disable_dma(uart->tx_dma_channel); 522 disable_dma(uart->tx_dma_channel);
523 clear_dma_irqstat(uart->tx_dma_channel);
537#ifdef CONFIG_BF54x 524#ifdef CONFIG_BF54x
538 UART_CLEAR_IER(uart, ETBEI); 525 UART_CLEAR_IER(uart, ETBEI);
539#else 526#else
@@ -541,15 +528,13 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
541 ier &= ~ETBEI; 528 ier &= ~ETBEI;
542 UART_PUT_IER(uart, ier); 529 UART_PUT_IER(uart, ier);
543#endif 530#endif
544 xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1); 531 xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1);
545 uart->port.icount.tx+=uart->tx_count; 532 uart->port.icount.tx += uart->tx_count;
546 533
547 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 534 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
548 uart_write_wakeup(&uart->port); 535 uart_write_wakeup(&uart->port);
549 536
550 if (uart_circ_empty(xmit)) 537 bfin_serial_dma_tx_chars(uart);
551 bfin_serial_stop_tx(&uart->port);
552 uart->tx_done = 1;
553 } 538 }
554 539
555 spin_unlock(&uart->port.lock); 540 spin_unlock(&uart->port.lock);
@@ -561,18 +546,15 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id)
561 struct bfin_serial_port *uart = dev_id; 546 struct bfin_serial_port *uart = dev_id;
562 unsigned short irqstat; 547 unsigned short irqstat;
563 548
564 uart->rx_dma_nrows++;
565 if (uart->rx_dma_nrows == DMA_RX_YCOUNT) {
566 uart->rx_dma_nrows = 0;
567 uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT;
568 bfin_serial_dma_rx_chars(uart);
569 uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0;
570 }
571 spin_lock(&uart->port.lock); 549 spin_lock(&uart->port.lock);
572 irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); 550 irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
573 clear_dma_irqstat(uart->rx_dma_channel); 551 clear_dma_irqstat(uart->rx_dma_channel);
574
575 spin_unlock(&uart->port.lock); 552 spin_unlock(&uart->port.lock);
553
554 del_timer(&(uart->rx_dma_timer));
555 uart->rx_dma_timer.expires = jiffies;
556 add_timer(&(uart->rx_dma_timer));
557
576 return IRQ_HANDLED; 558 return IRQ_HANDLED;
577} 559}
578#endif 560#endif
@@ -599,7 +581,11 @@ static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
599 if (uart->cts_pin < 0) 581 if (uart->cts_pin < 0)
600 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; 582 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
601 583
584# ifdef BF54x
585 if (UART_GET_MSR(uart) & CTS)
586# else
602 if (gpio_get_value(uart->cts_pin)) 587 if (gpio_get_value(uart->cts_pin))
588# endif
603 return TIOCM_DSR | TIOCM_CAR; 589 return TIOCM_DSR | TIOCM_CAR;
604 else 590 else
605#endif 591#endif
@@ -614,9 +600,17 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
614 return; 600 return;
615 601
616 if (mctrl & TIOCM_RTS) 602 if (mctrl & TIOCM_RTS)
603# ifdef BF54x
604 UART_PUT_MCR(uart, UART_GET_MCR(uart) & ~MRTS);
605# else
617 gpio_set_value(uart->rts_pin, 0); 606 gpio_set_value(uart->rts_pin, 0);
607# endif
618 else 608 else
609# ifdef BF54x
610 UART_PUT_MCR(uart, UART_GET_MCR(uart) | MRTS);
611# else
619 gpio_set_value(uart->rts_pin, 1); 612 gpio_set_value(uart->rts_pin, 1);
613# endif
620#endif 614#endif
621} 615}
622 616
@@ -627,22 +621,17 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart)
627{ 621{
628#ifdef CONFIG_SERIAL_BFIN_CTSRTS 622#ifdef CONFIG_SERIAL_BFIN_CTSRTS
629 unsigned int status; 623 unsigned int status;
630# ifdef CONFIG_SERIAL_BFIN_DMA
631 struct uart_info *info = uart->port.info; 624 struct uart_info *info = uart->port.info;
632 struct tty_struct *tty = info->tty; 625 struct tty_struct *tty = info->tty;
633 626
634 status = bfin_serial_get_mctrl(&uart->port); 627 status = bfin_serial_get_mctrl(&uart->port);
628 uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
635 if (!(status & TIOCM_CTS)) { 629 if (!(status & TIOCM_CTS)) {
636 tty->hw_stopped = 1; 630 tty->hw_stopped = 1;
631 schedule_work(&uart->cts_workqueue);
637 } else { 632 } else {
638 tty->hw_stopped = 0; 633 tty->hw_stopped = 0;
639 } 634 }
640# else
641 status = bfin_serial_get_mctrl(&uart->port);
642 uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
643 if (!(status & TIOCM_CTS))
644 schedule_work(&uart->cts_workqueue);
645# endif
646#endif 635#endif
647} 636}
648 637
@@ -743,6 +732,7 @@ static void bfin_serial_shutdown(struct uart_port *port)
743 disable_dma(uart->rx_dma_channel); 732 disable_dma(uart->rx_dma_channel);
744 free_dma(uart->rx_dma_channel); 733 free_dma(uart->rx_dma_channel);
745 del_timer(&(uart->rx_dma_timer)); 734 del_timer(&(uart->rx_dma_timer));
735 dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0);
746#else 736#else
747#ifdef CONFIG_KGDB_UART 737#ifdef CONFIG_KGDB_UART
748 if (uart->port.line != CONFIG_KGDB_UART_PORT) 738 if (uart->port.line != CONFIG_KGDB_UART_PORT)
@@ -814,6 +804,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
814 quot = uart_get_divisor(port, baud); 804 quot = uart_get_divisor(port, baud);
815 spin_lock_irqsave(&uart->port.lock, flags); 805 spin_lock_irqsave(&uart->port.lock, flags);
816 806
807 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
808
817 do { 809 do {
818 lsr = UART_GET_LSR(uart); 810 lsr = UART_GET_LSR(uart);
819 } while (!(lsr & TEMT)); 811 } while (!(lsr & TEMT));
@@ -956,10 +948,9 @@ static void __init bfin_serial_init_ports(void)
956 bfin_serial_ports[i].rx_dma_channel = 948 bfin_serial_ports[i].rx_dma_channel =
957 bfin_serial_resource[i].uart_rx_dma_channel; 949 bfin_serial_resource[i].uart_rx_dma_channel;
958 init_timer(&(bfin_serial_ports[i].rx_dma_timer)); 950 init_timer(&(bfin_serial_ports[i].rx_dma_timer));
959#else
960 INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work);
961#endif 951#endif
962#ifdef CONFIG_SERIAL_BFIN_CTSRTS 952#ifdef CONFIG_SERIAL_BFIN_CTSRTS
953 INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work);
963 bfin_serial_ports[i].cts_pin = 954 bfin_serial_ports[i].cts_pin =
964 bfin_serial_resource[i].uart_cts_pin; 955 bfin_serial_resource[i].uart_cts_pin;
965 bfin_serial_ports[i].rts_pin = 956 bfin_serial_ports[i].rts_pin =
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c
index 348ee2c19b58..c2bb11c02bde 100644
--- a/drivers/serial/m32r_sio.c
+++ b/drivers/serial/m32r_sio.c
@@ -421,7 +421,7 @@ static void transmit_chars(struct uart_sio_port *up)
421 up->port.icount.tx++; 421 up->port.icount.tx++;
422 if (uart_circ_empty(xmit)) 422 if (uart_circ_empty(xmit))
423 break; 423 break;
424 while (!serial_in(up, UART_LSR) & UART_LSR_THRE); 424 while (!(serial_in(up, UART_LSR) & UART_LSR_THRE));
425 425
426 } while (--count > 0); 426 } while (--count > 0);
427 427
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index a64d85821996..c0e50a461055 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -138,7 +138,7 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
138 { /* end of list */ }, 138 { /* end of list */ },
139}; 139};
140 140
141static struct of_platform_driver __devinitdata of_platform_serial_driver = { 141static struct of_platform_driver of_platform_serial_driver = {
142 .owner = THIS_MODULE, 142 .owner = THIS_MODULE,
143 .name = "of_serial", 143 .name = "of_serial",
144 .probe = of_platform_serial_probe, 144 .probe = of_platform_serial_probe,
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 9ce12cb2cebc..a8c116b80bff 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -41,6 +41,7 @@
41#include <linux/delay.h> 41#include <linux/delay.h>
42#include <linux/console.h> 42#include <linux/console.h>
43#include <linux/platform_device.h> 43#include <linux/platform_device.h>
44#include <linux/serial_sci.h>
44 45
45#ifdef CONFIG_CPU_FREQ 46#ifdef CONFIG_CPU_FREQ
46#include <linux/notifier.h> 47#include <linux/notifier.h>
@@ -54,7 +55,6 @@
54#include <asm/kgdb.h> 55#include <asm/kgdb.h>
55#endif 56#endif
56 57
57#include <asm/sci.h>
58#include "sh-sci.h" 58#include "sh-sci.h"
59 59
60struct sci_port { 60struct sci_port {
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 9cfcfd8dad5e..617efb1640b1 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Core maple bus functionality 2 * Core maple bus functionality
3 * 3 *
4 * Copyright (C) 2007 Adrian McMenamin 4 * Copyright (C) 2007, 2008 Adrian McMenamin
5 * 5 *
6 * Based on 2.4 code by: 6 * Based on 2.4 code by:
7 * 7 *
@@ -18,7 +18,6 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/module.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/list.h> 22#include <linux/list.h>
24#include <linux/io.h> 23#include <linux/io.h>
@@ -54,7 +53,7 @@ static struct device maple_bus;
54static int subdevice_map[MAPLE_PORTS]; 53static int subdevice_map[MAPLE_PORTS];
55static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 54static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
56static unsigned long maple_pnp_time; 55static unsigned long maple_pnp_time;
57static int started, scanning, liststatus, realscan; 56static int started, scanning, liststatus, fullscan;
58static struct kmem_cache *maple_queue_cache; 57static struct kmem_cache *maple_queue_cache;
59 58
60struct maple_device_specify { 59struct maple_device_specify {
@@ -62,6 +61,9 @@ struct maple_device_specify {
62 int unit; 61 int unit;
63}; 62};
64 63
64static bool checked[4];
65static struct maple_device *baseunits[4];
66
65/** 67/**
66 * maple_driver_register - register a device driver 68 * maple_driver_register - register a device driver
67 * automatically makes the driver bus a maple bus 69 * automatically makes the driver bus a maple bus
@@ -309,11 +311,9 @@ static void maple_attach_driver(struct maple_device *mdev)
309 else 311 else
310 break; 312 break;
311 313
312 if (realscan) { 314 printk(KERN_INFO "Maple device detected: %s\n",
313 printk(KERN_INFO "Maple device detected: %s\n", 315 mdev->product_name);
314 mdev->product_name); 316 printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
315 printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
316 }
317 317
318 function = be32_to_cpu(mdev->devinfo.function); 318 function = be32_to_cpu(mdev->devinfo.function);
319 319
@@ -323,10 +323,9 @@ static void maple_attach_driver(struct maple_device *mdev)
323 mdev->driver = &maple_dummy_driver; 323 mdev->driver = &maple_dummy_driver;
324 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); 324 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
325 } else { 325 } else {
326 if (realscan) 326 printk(KERN_INFO
327 printk(KERN_INFO 327 "Maple bus at (%d, %d): Function 0x%lX\n",
328 "Maple bus at (%d, %d): Function 0x%lX\n", 328 mdev->port, mdev->unit, function);
329 mdev->port, mdev->unit, function);
330 329
331 matched = 330 matched =
332 bus_for_each_drv(&maple_bus_type, NULL, mdev, 331 bus_for_each_drv(&maple_bus_type, NULL, mdev,
@@ -334,9 +333,8 @@ static void maple_attach_driver(struct maple_device *mdev)
334 333
335 if (matched == 0) { 334 if (matched == 0) {
336 /* Driver does not exist yet */ 335 /* Driver does not exist yet */
337 if (realscan) 336 printk(KERN_INFO
338 printk(KERN_INFO 337 "No maple driver found.\n");
339 "No maple driver found.\n");
340 mdev->driver = &maple_dummy_driver; 338 mdev->driver = &maple_dummy_driver;
341 } 339 }
342 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, 340 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
@@ -472,9 +470,12 @@ static void maple_response_none(struct maple_device *mdev,
472 maple_detach_driver(mdev); 470 maple_detach_driver(mdev);
473 return; 471 return;
474 } 472 }
475 if (!started) { 473 if (!started || !fullscan) {
476 printk(KERN_INFO "No maple devices attached to port %d\n", 474 if (checked[mdev->port] == false) {
477 mdev->port); 475 checked[mdev->port] = true;
476 printk(KERN_INFO "No maple devices attached"
477 " to port %d\n", mdev->port);
478 }
478 return; 479 return;
479 } 480 }
480 maple_clean_submap(mdev); 481 maple_clean_submap(mdev);
@@ -485,8 +486,14 @@ static void maple_response_devinfo(struct maple_device *mdev,
485 char *recvbuf) 486 char *recvbuf)
486{ 487{
487 char submask; 488 char submask;
488 if ((!started) || (scanning == 2)) { 489 if (!started || (scanning == 2) || !fullscan) {
489 maple_attach_driver(mdev); 490 if ((mdev->unit == 0) && (checked[mdev->port] == false)) {
491 checked[mdev->port] = true;
492 maple_attach_driver(mdev);
493 } else {
494 if (mdev->unit != 0)
495 maple_attach_driver(mdev);
496 }
490 return; 497 return;
491 } 498 }
492 if (mdev->unit == 0) { 499 if (mdev->unit == 0) {
@@ -505,6 +512,7 @@ static void maple_dma_handler(struct work_struct *work)
505 struct maple_device *dev; 512 struct maple_device *dev;
506 char *recvbuf; 513 char *recvbuf;
507 enum maple_code code; 514 enum maple_code code;
515 int i;
508 516
509 if (!maple_dma_done()) 517 if (!maple_dma_done())
510 return; 518 return;
@@ -557,6 +565,19 @@ static void maple_dma_handler(struct work_struct *work)
557 } else 565 } else
558 scanning = 0; 566 scanning = 0;
559 567
568 if (!fullscan) {
569 fullscan = 1;
570 for (i = 0; i < MAPLE_PORTS; i++) {
571 if (checked[i] == false) {
572 fullscan = 0;
573 dev = baseunits[i];
574 dev->mq->command =
575 MAPLE_COMMAND_DEVINFO;
576 dev->mq->length = 0;
577 maple_add_packet(dev->mq);
578 }
579 }
580 }
560 if (started == 0) 581 if (started == 0)
561 started = 1; 582 started = 1;
562 } 583 }
@@ -694,7 +715,9 @@ static int __init maple_bus_init(void)
694 715
695 /* setup maple ports */ 716 /* setup maple ports */
696 for (i = 0; i < MAPLE_PORTS; i++) { 717 for (i = 0; i < MAPLE_PORTS; i++) {
718 checked[i] = false;
697 mdev[i] = maple_alloc_dev(i, 0); 719 mdev[i] = maple_alloc_dev(i, 0);
720 baseunits[i] = mdev[i];
698 if (!mdev[i]) { 721 if (!mdev[i]) {
699 while (i-- > 0) 722 while (i-- > 0)
700 maple_free_dev(mdev[i]); 723 maple_free_dev(mdev[i]);
@@ -703,12 +726,9 @@ static int __init maple_bus_init(void)
703 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; 726 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO;
704 mdev[i]->mq->length = 0; 727 mdev[i]->mq->length = 0;
705 maple_add_packet(mdev[i]->mq); 728 maple_add_packet(mdev[i]->mq);
706 /* delay aids hardware detection */
707 mdelay(5);
708 subdevice_map[i] = 0; 729 subdevice_map[i] = 0;
709 } 730 }
710 731
711 realscan = 1;
712 /* setup maplebus hardware */ 732 /* setup maplebus hardware */
713 maplebus_dma_reset(); 733 maplebus_dma_reset();
714 /* initial detection */ 734 /* initial detection */
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 293b7cab3e57..85687aaf9cab 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -87,6 +87,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
87 unsigned gpio = (unsigned) spi->controller_data; 87 unsigned gpio = (unsigned) spi->controller_data;
88 unsigned active = spi->mode & SPI_CS_HIGH; 88 unsigned active = spi->mode & SPI_CS_HIGH;
89 u32 mr; 89 u32 mr;
90 int i;
91 u32 csr;
92 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
93
94 /* Make sure clock polarity is correct */
95 for (i = 0; i < spi->master->num_chipselect; i++) {
96 csr = spi_readl(as, CSR0 + 4 * i);
97 if ((csr ^ cpol) & SPI_BIT(CPOL))
98 spi_writel(as, CSR0 + 4 * i, csr ^ SPI_BIT(CPOL));
99 }
90 100
91 mr = spi_readl(as, MR); 101 mr = spi_readl(as, MR);
92 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 102 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 253ed5682a6d..a86315a0c5b8 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -42,6 +42,7 @@ struct mpc52xx_psc_spi {
42 42
43 /* driver internal data */ 43 /* driver internal data */
44 struct mpc52xx_psc __iomem *psc; 44 struct mpc52xx_psc __iomem *psc;
45 struct mpc52xx_psc_fifo __iomem *fifo;
45 unsigned int irq; 46 unsigned int irq;
46 u8 bits_per_word; 47 u8 bits_per_word;
47 u8 busy; 48 u8 busy;
@@ -139,6 +140,7 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
139{ 140{
140 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); 141 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
141 struct mpc52xx_psc __iomem *psc = mps->psc; 142 struct mpc52xx_psc __iomem *psc = mps->psc;
143 struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
142 unsigned rb = 0; /* number of bytes receieved */ 144 unsigned rb = 0; /* number of bytes receieved */
143 unsigned sb = 0; /* number of bytes sent */ 145 unsigned sb = 0; /* number of bytes sent */
144 unsigned char *rx_buf = (unsigned char *)t->rx_buf; 146 unsigned char *rx_buf = (unsigned char *)t->rx_buf;
@@ -190,11 +192,11 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
190 out_8(&psc->mode, 0); 192 out_8(&psc->mode, 0);
191 } else { 193 } else {
192 out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); 194 out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
193 out_be16(&psc->rfalarm, rfalarm); 195 out_be16(&fifo->rfalarm, rfalarm);
194 } 196 }
195 out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); 197 out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY);
196 wait_for_completion(&mps->done); 198 wait_for_completion(&mps->done);
197 recv_at_once = in_be16(&psc->rfnum); 199 recv_at_once = in_be16(&fifo->rfnum);
198 dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); 200 dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once);
199 201
200 send_at_once = recv_at_once; 202 send_at_once = recv_at_once;
@@ -331,6 +333,7 @@ static void mpc52xx_psc_spi_cleanup(struct spi_device *spi)
331static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) 333static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
332{ 334{
333 struct mpc52xx_psc __iomem *psc = mps->psc; 335 struct mpc52xx_psc __iomem *psc = mps->psc;
336 struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
334 u32 mclken_div; 337 u32 mclken_div;
335 int ret = 0; 338 int ret = 0;
336 339
@@ -346,7 +349,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
346 /* Disable interrupts, interrupts are based on alarm level */ 349 /* Disable interrupts, interrupts are based on alarm level */
347 out_be16(&psc->mpc52xx_psc_imr, 0); 350 out_be16(&psc->mpc52xx_psc_imr, 0);
348 out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); 351 out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
349 out_8(&psc->rfcntl, 0); 352 out_8(&fifo->rfcntl, 0);
350 out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); 353 out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
351 354
352 /* Configure 8bit codec mode as a SPI master and use EOF flags */ 355 /* Configure 8bit codec mode as a SPI master and use EOF flags */
@@ -419,6 +422,8 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
419 ret = -EFAULT; 422 ret = -EFAULT;
420 goto free_master; 423 goto free_master;
421 } 424 }
425 /* On the 5200, fifo regs are immediately ajacent to the psc regs */
426 mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc);
422 427
423 ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", 428 ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi",
424 mps); 429 mps);
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 365e0e355aea..59deed79e0ab 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -51,13 +51,19 @@ MODULE_LICENSE("GPL");
51#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 51#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
52#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) 52#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
53 53
54/* for testing SSCR1 changes that require SSP restart, basically 54/*
55 * everything except the service and interrupt enables */ 55 * for testing SSCR1 changes that require SSP restart, basically
56#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_EBCEI | SSCR1_SCFR \ 56 * everything except the service and interrupt enables, the pxa270 developer
57 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
58 * list, but the PXA255 dev man says all bits without really meaning the
59 * service and interrupt enables
60 */
61#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
57 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \ 62 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
58 | SSCR1_RWOT | SSCR1_TRAIL | SSCR1_PINTE \ 63 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
59 | SSCR1_STRF | SSCR1_EFWR |SSCR1_RFT \ 64 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
60 | SSCR1_TFT | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 65 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
66 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
61 67
62#define DEFINE_SSP_REG(reg, off) \ 68#define DEFINE_SSP_REG(reg, off) \
63static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \ 69static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
@@ -973,9 +979,6 @@ static void pump_transfers(unsigned long data)
973 if (drv_data->ssp_type == PXA25x_SSP) 979 if (drv_data->ssp_type == PXA25x_SSP)
974 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; 980 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
975 981
976 /* Fix me, need to handle cs polarity */
977 drv_data->cs_control(PXA2XX_CS_ASSERT);
978
979 /* Clear status and start DMA engine */ 982 /* Clear status and start DMA engine */
980 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 983 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
981 write_SSSR(drv_data->clear_sr, reg); 984 write_SSSR(drv_data->clear_sr, reg);
@@ -985,9 +988,6 @@ static void pump_transfers(unsigned long data)
985 /* Ensure we have the correct interrupt handler */ 988 /* Ensure we have the correct interrupt handler */
986 drv_data->transfer_handler = interrupt_transfer; 989 drv_data->transfer_handler = interrupt_transfer;
987 990
988 /* Fix me, need to handle cs polarity */
989 drv_data->cs_control(PXA2XX_CS_ASSERT);
990
991 /* Clear status */ 991 /* Clear status */
992 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; 992 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
993 write_SSSR(drv_data->clear_sr, reg); 993 write_SSSR(drv_data->clear_sr, reg);
@@ -998,16 +998,29 @@ static void pump_transfers(unsigned long data)
998 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != 998 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
999 (cr1 & SSCR1_CHANGE_MASK)) { 999 (cr1 & SSCR1_CHANGE_MASK)) {
1000 1000
1001 /* stop the SSP, and update the other bits */
1001 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 1002 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
1002 if (drv_data->ssp_type != PXA25x_SSP) 1003 if (drv_data->ssp_type != PXA25x_SSP)
1003 write_SSTO(chip->timeout, reg); 1004 write_SSTO(chip->timeout, reg);
1004 write_SSCR1(cr1, reg); 1005 /* first set CR1 without interrupt and service enables */
1006 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
1007 /* restart the SSP */
1005 write_SSCR0(cr0, reg); 1008 write_SSCR0(cr0, reg);
1009
1006 } else { 1010 } else {
1007 if (drv_data->ssp_type != PXA25x_SSP) 1011 if (drv_data->ssp_type != PXA25x_SSP)
1008 write_SSTO(chip->timeout, reg); 1012 write_SSTO(chip->timeout, reg);
1009 write_SSCR1(cr1, reg);
1010 } 1013 }
1014
1015 /* FIXME, need to handle cs polarity,
1016 * this driver uses struct pxa2xx_spi_chip.cs_control to
1017 * specify a CS handling function, and it ignores most
1018 * struct spi_device.mode[s], including SPI_CS_HIGH */
1019 drv_data->cs_control(PXA2XX_CS_ASSERT);
1020
1021 /* after chip select, release the data by enabling service
1022 * requests and interrupts, without changing any mode bits */
1023 write_SSCR1(cr1, reg);
1011} 1024}
1012 1025
1013static void pump_messages(struct work_struct *work) 1026static void pump_messages(struct work_struct *work)
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d976660cb7f0..adea792fb675 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -35,6 +35,11 @@ config SSB_PCIHOST
35 35
36 If unsure, say Y 36 If unsure, say Y
37 37
38config SSB_B43_PCI_BRIDGE
39 bool
40 depends on SSB_PCIHOST
41 default n
42
38config SSB_PCMCIAHOST_POSSIBLE 43config SSB_PCMCIAHOST_POSSIBLE
39 bool 44 bool
40 depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL 45 depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL
@@ -105,6 +110,12 @@ config SSB_DRIVER_MIPS
105 110
106 If unsure, say N 111 If unsure, say N
107 112
113# Assumption: We are on embedded, if we compile the MIPS core.
114config SSB_EMBEDDED
115 bool
116 depends on SSB_DRIVER_MIPS
117 default y
118
108config SSB_DRIVER_EXTIF 119config SSB_DRIVER_EXTIF
109 bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)" 120 bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)"
110 depends on SSB_DRIVER_MIPS && EXPERIMENTAL 121 depends on SSB_DRIVER_MIPS && EXPERIMENTAL
diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile
index 7be397595805..de94c2eb7a37 100644
--- a/drivers/ssb/Makefile
+++ b/drivers/ssb/Makefile
@@ -1,5 +1,6 @@
1# core 1# core
2ssb-y += main.o scan.o 2ssb-y += main.o scan.o
3ssb-$(CONFIG_SSB_EMBEDDED) += embedded.o
3 4
4# host support 5# host support
5ssb-$(CONFIG_SSB_PCIHOST) += pci.o pcihost_wrapper.o 6ssb-$(CONFIG_SSB_PCIHOST) += pci.o pcihost_wrapper.o
@@ -13,6 +14,6 @@ ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o
13 14
14# b43 pci-ssb-bridge driver 15# b43 pci-ssb-bridge driver
15# Not strictly a part of SSB, but kept here for convenience 16# Not strictly a part of SSB, but kept here for convenience
16ssb-$(CONFIG_SSB_PCIHOST) += b43_pci_bridge.o 17ssb-$(CONFIG_SSB_B43_PCI_BRIDGE) += b43_pci_bridge.o
17 18
18obj-$(CONFIG_SSB) += ssb.o 19obj-$(CONFIG_SSB) += ssb.o
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 6fbf1c53b6f2..e586321a473a 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -39,12 +39,14 @@ static inline void chipco_write32(struct ssb_chipcommon *cc,
39 ssb_write32(cc->dev, offset, value); 39 ssb_write32(cc->dev, offset, value);
40} 40}
41 41
42static inline void chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset, 42static inline u32 chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset,
43 u32 mask, u32 value) 43 u32 mask, u32 value)
44{ 44{
45 value &= mask; 45 value &= mask;
46 value |= chipco_read32(cc, offset) & ~mask; 46 value |= chipco_read32(cc, offset) & ~mask;
47 chipco_write32(cc, offset, value); 47 chipco_write32(cc, offset, value);
48
49 return value;
48} 50}
49 51
50void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, 52void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
@@ -356,14 +358,29 @@ u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask)
356 return chipco_read32(cc, SSB_CHIPCO_GPIOIN) & mask; 358 return chipco_read32(cc, SSB_CHIPCO_GPIOIN) & mask;
357} 359}
358 360
359void ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value) 361u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value)
362{
363 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
364}
365
366u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value)
367{
368 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
369}
370
371u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
372{
373 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
374}
375
376u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
360{ 377{
361 chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value); 378 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
362} 379}
363 380
364void ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value) 381u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value)
365{ 382{
366 chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value); 383 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
367} 384}
368 385
369#ifdef CONFIG_SSB_SERIAL 386#ifdef CONFIG_SSB_SERIAL
@@ -376,6 +393,7 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
376 unsigned int irq; 393 unsigned int irq;
377 u32 baud_base, div; 394 u32 baud_base, div;
378 u32 i, n; 395 u32 i, n;
396 unsigned int ccrev = cc->dev->id.revision;
379 397
380 plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT); 398 plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT);
381 irq = ssb_mips_irq(cc->dev); 399 irq = ssb_mips_irq(cc->dev);
@@ -387,14 +405,39 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
387 chipco_read32(cc, SSB_CHIPCO_CLOCK_M2)); 405 chipco_read32(cc, SSB_CHIPCO_CLOCK_M2));
388 div = 1; 406 div = 1;
389 } else { 407 } else {
390 if (cc->dev->id.revision >= 11) { 408 if (ccrev == 20) {
409 /* BCM5354 uses constant 25MHz clock */
410 baud_base = 25000000;
411 div = 48;
412 /* Set the override bit so we don't divide it */
413 chipco_write32(cc, SSB_CHIPCO_CORECTL,
414 chipco_read32(cc, SSB_CHIPCO_CORECTL)
415 | SSB_CHIPCO_CORECTL_UARTCLK0);
416 } else if ((ccrev >= 11) && (ccrev != 15)) {
391 /* Fixed ALP clock */ 417 /* Fixed ALP clock */
392 baud_base = 20000000; 418 baud_base = 20000000;
419 if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
420 /* FIXME: baud_base is different for devices with a PMU */
421 SSB_WARN_ON(1);
422 }
393 div = 1; 423 div = 1;
424 if (ccrev >= 21) {
425 /* Turn off UART clock before switching clocksource. */
426 chipco_write32(cc, SSB_CHIPCO_CORECTL,
427 chipco_read32(cc, SSB_CHIPCO_CORECTL)
428 & ~SSB_CHIPCO_CORECTL_UARTCLKEN);
429 }
394 /* Set the override bit so we don't divide it */ 430 /* Set the override bit so we don't divide it */
395 chipco_write32(cc, SSB_CHIPCO_CORECTL, 431 chipco_write32(cc, SSB_CHIPCO_CORECTL,
396 SSB_CHIPCO_CORECTL_UARTCLK0); 432 chipco_read32(cc, SSB_CHIPCO_CORECTL)
397 } else if (cc->dev->id.revision >= 3) { 433 | SSB_CHIPCO_CORECTL_UARTCLK0);
434 if (ccrev >= 21) {
435 /* Re-enable the UART clock. */
436 chipco_write32(cc, SSB_CHIPCO_CORECTL,
437 chipco_read32(cc, SSB_CHIPCO_CORECTL)
438 | SSB_CHIPCO_CORECTL_UARTCLKEN);
439 }
440 } else if (ccrev >= 3) {
398 /* Internal backplane clock */ 441 /* Internal backplane clock */
399 baud_base = ssb_clockspeed(bus); 442 baud_base = ssb_clockspeed(bus);
400 div = chipco_read32(cc, SSB_CHIPCO_CLKDIV) 443 div = chipco_read32(cc, SSB_CHIPCO_CLKDIV)
@@ -406,7 +449,7 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
406 } 449 }
407 450
408 /* Clock source depends on strapping if UartClkOverride is unset */ 451 /* Clock source depends on strapping if UartClkOverride is unset */
409 if ((cc->dev->id.revision > 0) && 452 if ((ccrev > 0) &&
410 !(chipco_read32(cc, SSB_CHIPCO_CORECTL) & SSB_CHIPCO_CORECTL_UARTCLK0)) { 453 !(chipco_read32(cc, SSB_CHIPCO_CORECTL) & SSB_CHIPCO_CORECTL_UARTCLK0)) {
411 if ((cc->capabilities & SSB_CHIPCO_CAP_UARTCLK) == 454 if ((cc->capabilities & SSB_CHIPCO_CAP_UARTCLK) ==
412 SSB_CHIPCO_CAP_UARTCLK_INT) { 455 SSB_CHIPCO_CAP_UARTCLK_INT) {
@@ -428,7 +471,7 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
428 cc_mmio = cc->dev->bus->mmio + (cc->dev->core_index * SSB_CORE_SIZE); 471 cc_mmio = cc->dev->bus->mmio + (cc->dev->core_index * SSB_CORE_SIZE);
429 uart_regs = cc_mmio + SSB_CHIPCO_UART0_DATA; 472 uart_regs = cc_mmio + SSB_CHIPCO_UART0_DATA;
430 /* Offset changed at after rev 0 */ 473 /* Offset changed at after rev 0 */
431 if (cc->dev->id.revision == 0) 474 if (ccrev == 0)
432 uart_regs += (i * 8); 475 uart_regs += (i * 8);
433 else 476 else
434 uart_regs += (i * 256); 477 uart_regs += (i * 256);
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index fe55eb8b038a..c3e1d3e6d610 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -27,12 +27,14 @@ static inline void extif_write32(struct ssb_extif *extif, u16 offset, u32 value)
27 ssb_write32(extif->dev, offset, value); 27 ssb_write32(extif->dev, offset, value);
28} 28}
29 29
30static inline void extif_write32_masked(struct ssb_extif *extif, u16 offset, 30static inline u32 extif_write32_masked(struct ssb_extif *extif, u16 offset,
31 u32 mask, u32 value) 31 u32 mask, u32 value)
32{ 32{
33 value &= mask; 33 value &= mask;
34 value |= extif_read32(extif, offset) & ~mask; 34 value |= extif_read32(extif, offset) & ~mask;
35 extif_write32(extif, offset, value); 35 extif_write32(extif, offset, value);
36
37 return value;
36} 38}
37 39
38#ifdef CONFIG_SSB_SERIAL 40#ifdef CONFIG_SSB_SERIAL
@@ -110,20 +112,35 @@ void ssb_extif_get_clockcontrol(struct ssb_extif *extif,
110 *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB); 112 *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB);
111} 113}
112 114
115void ssb_extif_watchdog_timer_set(struct ssb_extif *extif,
116 u32 ticks)
117{
118 extif_write32(extif, SSB_EXTIF_WATCHDOG, ticks);
119}
120
113u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) 121u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
114{ 122{
115 return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask; 123 return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask;
116} 124}
117 125
118void ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value) 126u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value)
119{ 127{
120 return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0), 128 return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0),
121 mask, value); 129 mask, value);
122} 130}
123 131
124void ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value) 132u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value)
125{ 133{
126 return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0), 134 return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0),
127 mask, value); 135 mask, value);
128} 136}
129 137
138u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value)
139{
140 return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value);
141}
142
143u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value)
144{
145 return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value);
146}
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 2faaa906d5d6..74b9a8aea52b 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -11,6 +11,7 @@
11#include <linux/ssb/ssb.h> 11#include <linux/ssb/ssb.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/ssb/ssb_embedded.h>
14 15
15#include "ssb_private.h" 16#include "ssb_private.h"
16 17
@@ -27,6 +28,18 @@ void pcicore_write32(struct ssb_pcicore *pc, u16 offset, u32 value)
27 ssb_write32(pc->dev, offset, value); 28 ssb_write32(pc->dev, offset, value);
28} 29}
29 30
31static inline
32u16 pcicore_read16(struct ssb_pcicore *pc, u16 offset)
33{
34 return ssb_read16(pc->dev, offset);
35}
36
37static inline
38void pcicore_write16(struct ssb_pcicore *pc, u16 offset, u16 value)
39{
40 ssb_write16(pc->dev, offset, value);
41}
42
30/************************************************** 43/**************************************************
31 * Code for hostmode operation. 44 * Code for hostmode operation.
32 **************************************************/ 45 **************************************************/
@@ -66,6 +79,7 @@ int pcibios_plat_dev_init(struct pci_dev *d)
66 base = &ssb_pcicore_pcibus_iobase; 79 base = &ssb_pcicore_pcibus_iobase;
67 else 80 else
68 base = &ssb_pcicore_pcibus_membase; 81 base = &ssb_pcicore_pcibus_membase;
82 res->flags |= IORESOURCE_PCI_FIXED;
69 if (res->end) { 83 if (res->end) {
70 size = res->end - res->start + 1; 84 size = res->end - res->start + 1;
71 if (*base & (size - 1)) 85 if (*base & (size - 1))
@@ -88,20 +102,28 @@ int pcibios_plat_dev_init(struct pci_dev *d)
88 102
89static void __init ssb_fixup_pcibridge(struct pci_dev *dev) 103static void __init ssb_fixup_pcibridge(struct pci_dev *dev)
90{ 104{
105 u8 lat;
106
91 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0) 107 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0)
92 return; 108 return;
93 109
94 ssb_printk(KERN_INFO "PCI: fixing up bridge\n"); 110 ssb_printk(KERN_INFO "PCI: Fixing up bridge %s\n", pci_name(dev));
95 111
96 /* Enable PCI bridge bus mastering and memory space */ 112 /* Enable PCI bridge bus mastering and memory space */
97 pci_set_master(dev); 113 pci_set_master(dev);
98 pcibios_enable_device(dev, ~0); 114 if (pcibios_enable_device(dev, ~0) < 0) {
115 ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n");
116 return;
117 }
99 118
100 /* Enable PCI bridge BAR1 prefetch and burst */ 119 /* Enable PCI bridge BAR1 prefetch and burst */
101 pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3); 120 pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3);
102 121
103 /* Make sure our latency is high enough to handle the devices behind us */ 122 /* Make sure our latency is high enough to handle the devices behind us */
104 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xa8); 123 lat = 168;
124 ssb_printk(KERN_INFO "PCI: Fixing latency timer of device %s to %u\n",
125 pci_name(dev), lat);
126 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
105} 127}
106DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_fixup_pcibridge); 128DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_fixup_pcibridge);
107 129
@@ -117,8 +139,10 @@ static u32 get_cfgspace_addr(struct ssb_pcicore *pc,
117 u32 addr = 0; 139 u32 addr = 0;
118 u32 tmp; 140 u32 tmp;
119 141
120 if (unlikely(pc->cardbusmode && dev > 1)) 142 /* We do only have one cardbus device behind the bridge. */
143 if (pc->cardbusmode && (dev >= 1))
121 goto out; 144 goto out;
145
122 if (bus == 0) { 146 if (bus == 0) {
123 /* Type 0 transaction */ 147 /* Type 0 transaction */
124 if (unlikely(dev >= SSB_PCI_SLOT_MAX)) 148 if (unlikely(dev >= SSB_PCI_SLOT_MAX))
@@ -279,14 +303,14 @@ static struct resource ssb_pcicore_mem_resource = {
279 .name = "SSB PCIcore external memory", 303 .name = "SSB PCIcore external memory",
280 .start = SSB_PCI_DMA, 304 .start = SSB_PCI_DMA,
281 .end = SSB_PCI_DMA + SSB_PCI_DMA_SZ - 1, 305 .end = SSB_PCI_DMA + SSB_PCI_DMA_SZ - 1,
282 .flags = IORESOURCE_MEM, 306 .flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED,
283}; 307};
284 308
285static struct resource ssb_pcicore_io_resource = { 309static struct resource ssb_pcicore_io_resource = {
286 .name = "SSB PCIcore external I/O", 310 .name = "SSB PCIcore external I/O",
287 .start = 0x100, 311 .start = 0x100,
288 .end = 0x7FF, 312 .end = 0x7FF,
289 .flags = IORESOURCE_IO, 313 .flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED,
290}; 314};
291 315
292static struct pci_controller ssb_pcicore_controller = { 316static struct pci_controller ssb_pcicore_controller = {
@@ -318,7 +342,16 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
318 pcicore_write32(pc, SSB_PCICORE_ARBCTL, val); 342 pcicore_write32(pc, SSB_PCICORE_ARBCTL, val);
319 udelay(1); /* Assertion time demanded by the PCI standard */ 343 udelay(1); /* Assertion time demanded by the PCI standard */
320 344
321 /*TODO cardbus mode */ 345 if (pc->dev->bus->has_cardbus_slot) {
346 ssb_dprintk(KERN_INFO PFX "CardBus slot detected\n");
347 pc->cardbusmode = 1;
348 /* GPIO 1 resets the bridge */
349 ssb_gpio_out(pc->dev->bus, 1, 1);
350 ssb_gpio_outen(pc->dev->bus, 1, 1);
351 pcicore_write16(pc, SSB_PCICORE_SPROM(0),
352 pcicore_read16(pc, SSB_PCICORE_SPROM(0))
353 | 0x0400);
354 }
322 355
323 /* 64MB I/O window */ 356 /* 64MB I/O window */
324 pcicore_write32(pc, SSB_PCICORE_SBTOPCI0, 357 pcicore_write32(pc, SSB_PCICORE_SBTOPCI0,
@@ -344,7 +377,8 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
344 /* Ok, ready to run, register it to the system. 377 /* Ok, ready to run, register it to the system.
345 * The following needs change, if we want to port hostmode 378 * The following needs change, if we want to port hostmode
346 * to non-MIPS platform. */ 379 * to non-MIPS platform. */
347 set_io_port_base((unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000)); 380 ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000);
381 set_io_port_base(ssb_pcicore_controller.io_map_base);
348 /* Give some time to the PCI controller to configure itself with the new 382 /* Give some time to the PCI controller to configure itself with the new
349 * values. Not waiting at this point causes crashes of the machine. */ 383 * values. Not waiting at this point causes crashes of the machine. */
350 mdelay(10); 384 mdelay(10);
@@ -362,7 +396,7 @@ static int pcicore_is_in_hostmode(struct ssb_pcicore *pc)
362 chipid_top != 0x5300) 396 chipid_top != 0x5300)
363 return 0; 397 return 0;
364 398
365 if (bus->sprom.r1.boardflags_lo & SSB_PCICORE_BFL_NOPCI) 399 if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
366 return 0; 400 return 0;
367 401
368 /* The 200-pin BCM4712 package does not bond out PCI. Even when 402 /* The 200-pin BCM4712 package does not bond out PCI. Even when
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
new file mode 100644
index 000000000000..d3ade821555c
--- /dev/null
+++ b/drivers/ssb/embedded.c
@@ -0,0 +1,132 @@
1/*
2 * Sonics Silicon Backplane
3 * Embedded systems support code
4 *
5 * Copyright 2005-2008, Broadcom Corporation
6 * Copyright 2006-2008, Michael Buesch <mb@bu3sch.de>
7 *
8 * Licensed under the GNU/GPL. See COPYING for details.
9 */
10
11#include <linux/ssb/ssb.h>
12#include <linux/ssb/ssb_embedded.h>
13
14#include "ssb_private.h"
15
16
17int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks)
18{
19 if (ssb_chipco_available(&bus->chipco)) {
20 ssb_chipco_watchdog_timer_set(&bus->chipco, ticks);
21 return 0;
22 }
23 if (ssb_extif_available(&bus->extif)) {
24 ssb_extif_watchdog_timer_set(&bus->extif, ticks);
25 return 0;
26 }
27 return -ENODEV;
28}
29
30u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask)
31{
32 unsigned long flags;
33 u32 res = 0;
34
35 spin_lock_irqsave(&bus->gpio_lock, flags);
36 if (ssb_chipco_available(&bus->chipco))
37 res = ssb_chipco_gpio_in(&bus->chipco, mask);
38 else if (ssb_extif_available(&bus->extif))
39 res = ssb_extif_gpio_in(&bus->extif, mask);
40 else
41 SSB_WARN_ON(1);
42 spin_unlock_irqrestore(&bus->gpio_lock, flags);
43
44 return res;
45}
46EXPORT_SYMBOL(ssb_gpio_in);
47
48u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value)
49{
50 unsigned long flags;
51 u32 res = 0;
52
53 spin_lock_irqsave(&bus->gpio_lock, flags);
54 if (ssb_chipco_available(&bus->chipco))
55 res = ssb_chipco_gpio_out(&bus->chipco, mask, value);
56 else if (ssb_extif_available(&bus->extif))
57 res = ssb_extif_gpio_out(&bus->extif, mask, value);
58 else
59 SSB_WARN_ON(1);
60 spin_unlock_irqrestore(&bus->gpio_lock, flags);
61
62 return res;
63}
64EXPORT_SYMBOL(ssb_gpio_out);
65
66u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value)
67{
68 unsigned long flags;
69 u32 res = 0;
70
71 spin_lock_irqsave(&bus->gpio_lock, flags);
72 if (ssb_chipco_available(&bus->chipco))
73 res = ssb_chipco_gpio_outen(&bus->chipco, mask, value);
74 else if (ssb_extif_available(&bus->extif))
75 res = ssb_extif_gpio_outen(&bus->extif, mask, value);
76 else
77 SSB_WARN_ON(1);
78 spin_unlock_irqrestore(&bus->gpio_lock, flags);
79
80 return res;
81}
82EXPORT_SYMBOL(ssb_gpio_outen);
83
84u32 ssb_gpio_control(struct ssb_bus *bus, u32 mask, u32 value)
85{
86 unsigned long flags;
87 u32 res = 0;
88
89 spin_lock_irqsave(&bus->gpio_lock, flags);
90 if (ssb_chipco_available(&bus->chipco))
91 res = ssb_chipco_gpio_control(&bus->chipco, mask, value);
92 spin_unlock_irqrestore(&bus->gpio_lock, flags);
93
94 return res;
95}
96EXPORT_SYMBOL(ssb_gpio_control);
97
98u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value)
99{
100 unsigned long flags;
101 u32 res = 0;
102
103 spin_lock_irqsave(&bus->gpio_lock, flags);
104 if (ssb_chipco_available(&bus->chipco))
105 res = ssb_chipco_gpio_intmask(&bus->chipco, mask, value);
106 else if (ssb_extif_available(&bus->extif))
107 res = ssb_extif_gpio_intmask(&bus->extif, mask, value);
108 else
109 SSB_WARN_ON(1);
110 spin_unlock_irqrestore(&bus->gpio_lock, flags);
111
112 return res;
113}
114EXPORT_SYMBOL(ssb_gpio_intmask);
115
116u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value)
117{
118 unsigned long flags;
119 u32 res = 0;
120
121 spin_lock_irqsave(&bus->gpio_lock, flags);
122 if (ssb_chipco_available(&bus->chipco))
123 res = ssb_chipco_gpio_polarity(&bus->chipco, mask, value);
124 else if (ssb_extif_available(&bus->extif))
125 res = ssb_extif_gpio_polarity(&bus->extif, mask, value);
126 else
127 SSB_WARN_ON(1);
128 spin_unlock_irqrestore(&bus->gpio_lock, flags);
129
130 return res;
131}
132EXPORT_SYMBOL(ssb_gpio_polarity);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 9028ed5715a1..bedb2b4ee9d2 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -557,6 +557,7 @@ static int ssb_fetch_invariants(struct ssb_bus *bus,
557 goto out; 557 goto out;
558 memcpy(&bus->boardinfo, &iv.boardinfo, sizeof(iv.boardinfo)); 558 memcpy(&bus->boardinfo, &iv.boardinfo, sizeof(iv.boardinfo));
559 memcpy(&bus->sprom, &iv.sprom, sizeof(iv.sprom)); 559 memcpy(&bus->sprom, &iv.sprom, sizeof(iv.sprom));
560 bus->has_cardbus_slot = iv.has_cardbus_slot;
560out: 561out:
561 return err; 562 return err;
562} 563}
@@ -569,6 +570,9 @@ static int ssb_bus_register(struct ssb_bus *bus,
569 570
570 spin_lock_init(&bus->bar_lock); 571 spin_lock_init(&bus->bar_lock);
571 INIT_LIST_HEAD(&bus->list); 572 INIT_LIST_HEAD(&bus->list);
573#ifdef CONFIG_SSB_EMBEDDED
574 spin_lock_init(&bus->gpio_lock);
575#endif
572 576
573 /* Powerup the bus */ 577 /* Powerup the bus */
574 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1); 578 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1);
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index a789364264a6..21eca2b5118b 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -120,10 +120,10 @@ extern int ssb_devices_thaw(struct ssb_bus *bus);
120extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev); 120extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev);
121 121
122/* b43_pci_bridge.c */ 122/* b43_pci_bridge.c */
123#ifdef CONFIG_SSB_PCIHOST 123#ifdef CONFIG_SSB_B43_PCI_BRIDGE
124extern int __init b43_pci_ssb_bridge_init(void); 124extern int __init b43_pci_ssb_bridge_init(void);
125extern void __exit b43_pci_ssb_bridge_exit(void); 125extern void __exit b43_pci_ssb_bridge_exit(void);
126#else /* CONFIG_SSB_PCIHOST */ 126#else /* CONFIG_SSB_B43_PCI_BRIDGR */
127static inline int b43_pci_ssb_bridge_init(void) 127static inline int b43_pci_ssb_bridge_init(void)
128{ 128{
129 return 0; 129 return 0;
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 5c33cdb9cac7..a2b0aa48b8ea 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -87,12 +87,13 @@ config USB_DYNAMIC_MINORS
87 If you are unsure about this, say N here. 87 If you are unsure about this, say N here.
88 88
89config USB_SUSPEND 89config USB_SUSPEND
90 bool "USB selective suspend/resume and wakeup (EXPERIMENTAL)" 90 bool "USB selective suspend/resume and wakeup"
91 depends on USB && PM && EXPERIMENTAL 91 depends on USB && PM
92 help 92 help
93 If you say Y here, you can use driver calls or the sysfs 93 If you say Y here, you can use driver calls or the sysfs
94 "power/state" file to suspend or resume individual USB 94 "power/level" file to suspend or resume individual USB
95 peripherals. 95 peripherals and to enable or disable autosuspend (see
96 Documentation/usb/power-management.txt for more details).
96 97
97 Also, USB "remote wakeup" signaling is supported, whereby some 98 Also, USB "remote wakeup" signaling is supported, whereby some
98 USB devices (like keyboards and network adapters) can wake up 99 USB devices (like keyboards and network adapters) can wake up
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f90ab5e94c58..d9d1eb19f2a1 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -28,35 +28,38 @@
28 * devices is broken... 28 * devices is broken...
29 */ 29 */
30static const struct usb_device_id usb_quirk_list[] = { 30static const struct usb_device_id usb_quirk_list[] = {
31 /* Action Semiconductor flash disk */
32 { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255},
33
34 /* CBM - Flash disk */ 31 /* CBM - Flash disk */
35 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 32 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
33
36 /* HP 5300/5370C scanner */ 34 /* HP 5300/5370C scanner */
37 { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, 35 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
36 USB_QUIRK_STRING_FETCH_255 },
38 37
39 /* Creative SB Audigy 2 NX */ 38 /* Creative SB Audigy 2 NX */
40 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 39 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
41 40
41 /* Philips PSC805 audio device */
42 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
43
42 /* Roland SC-8820 */ 44 /* Roland SC-8820 */
43 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, 45 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
44 46
45 /* Edirol SD-20 */ 47 /* Edirol SD-20 */
46 { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, 48 { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
47 49
48 /* INTEL VALUE SSD */
49 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
50
51 /* M-Systems Flash Disk Pioneers */ 50 /* M-Systems Flash Disk Pioneers */
52 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
53 52
54 /* Philips PSC805 audio device */ 53 /* Action Semiconductor flash disk */
55 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, 54 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
55 USB_QUIRK_STRING_FETCH_255 },
56 56
57 /* SKYMEDI USB_DRIVE */ 57 /* SKYMEDI USB_DRIVE */
58 { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, 58 { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
59 59
60 /* INTEL VALUE SSD */
61 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
62
60 { } /* terminating entry must be last */ 63 { } /* terminating entry must be last */
61}; 64};
62 65
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 4e984060c984..1f0db51190cc 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -99,8 +99,7 @@ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
99EXPORT_SYMBOL_GPL(usb_ifnum_to_if); 99EXPORT_SYMBOL_GPL(usb_ifnum_to_if);
100 100
101/** 101/**
102 * usb_altnum_to_altsetting - get the altsetting structure with a given 102 * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number.
103 * alternate setting number.
104 * @intf: the interface containing the altsetting in question 103 * @intf: the interface containing the altsetting in question
105 * @altnum: the desired alternate setting number 104 * @altnum: the desired alternate setting number
106 * 105 *
@@ -234,7 +233,7 @@ static int ksuspend_usb_init(void)
234 * singlethreaded. Its job doesn't justify running on more 233 * singlethreaded. Its job doesn't justify running on more
235 * than one CPU. 234 * than one CPU.
236 */ 235 */
237 ksuspend_usb_wq = create_singlethread_workqueue("ksuspend_usbd"); 236 ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd");
238 if (!ksuspend_usb_wq) 237 if (!ksuspend_usb_wq)
239 return -ENOMEM; 238 return -ENOMEM;
240 return 0; 239 return 0;
@@ -442,8 +441,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf);
442 */ 441 */
443 442
444/** 443/**
445 * usb_lock_device_for_reset - cautiously acquire the lock for a 444 * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure
446 * usb device structure
447 * @udev: device that's being locked 445 * @udev: device that's being locked
448 * @iface: interface bound to the driver making the request (optional) 446 * @iface: interface bound to the driver making the request (optional)
449 * 447 *
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c13955164686..6f45dd669b33 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -131,7 +131,7 @@ config USB_ATMEL_USBA
131 131
132config USB_GADGET_FSL_USB2 132config USB_GADGET_FSL_USB2
133 boolean "Freescale Highspeed USB DR Peripheral Controller" 133 boolean "Freescale Highspeed USB DR Peripheral Controller"
134 depends on MPC834x || PPC_MPC831x 134 depends on FSL_SOC
135 select USB_GADGET_DUALSPEED 135 select USB_GADGET_DUALSPEED
136 help 136 help
137 Some of Freescale PowerPC processors have a High Speed 137 Some of Freescale PowerPC processors have a High Speed
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 4f6bfa100f2a..2c32bd08ee7d 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -92,7 +92,6 @@ struct printer_dev {
92 u8 *current_rx_buf; 92 u8 *current_rx_buf;
93 u8 printer_status; 93 u8 printer_status;
94 u8 reset_printer; 94 u8 reset_printer;
95 struct class_device *printer_class_dev;
96 struct cdev printer_cdev; 95 struct cdev printer_cdev;
97 struct device *pdev; 96 struct device *pdev;
98 u8 printer_cdev_open; 97 u8 printer_cdev_open;
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c
index 4402d6f042d9..096c41cc40d1 100644
--- a/drivers/usb/gadget/pxa2xx_udc.c
+++ b/drivers/usb/gadget/pxa2xx_udc.c
@@ -103,6 +103,12 @@ static const char ep0name [] = "ep0";
103#error "Can't configure both IXP and PXA" 103#error "Can't configure both IXP and PXA"
104#endif 104#endif
105 105
106/* IXP doesn't yet support <linux/clk.h> */
107#define clk_get(dev,name) NULL
108#define clk_enable(clk) do { } while (0)
109#define clk_disable(clk) do { } while (0)
110#define clk_put(clk) do { } while (0)
111
106#endif 112#endif
107 113
108#include "pxa2xx_udc.h" 114#include "pxa2xx_udc.h"
@@ -934,20 +940,31 @@ static void udc_disable(struct pxa2xx_udc *);
934/* We disable the UDC -- and its 48 MHz clock -- whenever it's not 940/* We disable the UDC -- and its 48 MHz clock -- whenever it's not
935 * in active use. 941 * in active use.
936 */ 942 */
937static int pullup(struct pxa2xx_udc *udc, int is_active) 943static int pullup(struct pxa2xx_udc *udc)
938{ 944{
939 is_active = is_active && udc->vbus && udc->pullup; 945 int is_active = udc->vbus && udc->pullup && !udc->suspended;
940 DMSG("%s\n", is_active ? "active" : "inactive"); 946 DMSG("%s\n", is_active ? "active" : "inactive");
941 if (is_active) 947 if (is_active) {
942 udc_enable(udc); 948 if (!udc->active) {
943 else { 949 udc->active = 1;
944 if (udc->gadget.speed != USB_SPEED_UNKNOWN) { 950 /* Enable clock for USB device */
945 DMSG("disconnect %s\n", udc->driver 951 clk_enable(udc->clk);
946 ? udc->driver->driver.name 952 udc_enable(udc);
947 : "(no driver)");
948 stop_activity(udc, udc->driver);
949 } 953 }
950 udc_disable(udc); 954 } else {
955 if (udc->active) {
956 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
957 DMSG("disconnect %s\n", udc->driver
958 ? udc->driver->driver.name
959 : "(no driver)");
960 stop_activity(udc, udc->driver);
961 }
962 udc_disable(udc);
963 /* Disable clock for USB device */
964 clk_disable(udc->clk);
965 udc->active = 0;
966 }
967
951 } 968 }
952 return 0; 969 return 0;
953} 970}
@@ -958,9 +975,9 @@ static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
958 struct pxa2xx_udc *udc; 975 struct pxa2xx_udc *udc;
959 976
960 udc = container_of(_gadget, struct pxa2xx_udc, gadget); 977 udc = container_of(_gadget, struct pxa2xx_udc, gadget);
961 udc->vbus = is_active = (is_active != 0); 978 udc->vbus = (is_active != 0);
962 DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); 979 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
963 pullup(udc, is_active); 980 pullup(udc);
964 return 0; 981 return 0;
965} 982}
966 983
@@ -975,9 +992,8 @@ static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active)
975 if (!udc->mach->gpio_pullup && !udc->mach->udc_command) 992 if (!udc->mach->gpio_pullup && !udc->mach->udc_command)
976 return -EOPNOTSUPP; 993 return -EOPNOTSUPP;
977 994
978 is_active = (is_active != 0); 995 udc->pullup = (is_active != 0);
979 udc->pullup = is_active; 996 pullup(udc);
980 pullup(udc, is_active);
981 return 0; 997 return 0;
982} 998}
983 999
@@ -997,7 +1013,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = {
997#ifdef CONFIG_USB_GADGET_DEBUG_FS 1013#ifdef CONFIG_USB_GADGET_DEBUG_FS
998 1014
999static int 1015static int
1000udc_seq_show(struct seq_file *m, void *d) 1016udc_seq_show(struct seq_file *m, void *_d)
1001{ 1017{
1002 struct pxa2xx_udc *dev = m->private; 1018 struct pxa2xx_udc *dev = m->private;
1003 unsigned long flags; 1019 unsigned long flags;
@@ -1146,11 +1162,6 @@ static void udc_disable(struct pxa2xx_udc *dev)
1146 1162
1147 udc_clear_mask_UDCCR(UDCCR_UDE); 1163 udc_clear_mask_UDCCR(UDCCR_UDE);
1148 1164
1149#ifdef CONFIG_ARCH_PXA
1150 /* Disable clock for USB device */
1151 clk_disable(dev->clk);
1152#endif
1153
1154 ep0_idle (dev); 1165 ep0_idle (dev);
1155 dev->gadget.speed = USB_SPEED_UNKNOWN; 1166 dev->gadget.speed = USB_SPEED_UNKNOWN;
1156} 1167}
@@ -1191,11 +1202,6 @@ static void udc_enable (struct pxa2xx_udc *dev)
1191{ 1202{
1192 udc_clear_mask_UDCCR(UDCCR_UDE); 1203 udc_clear_mask_UDCCR(UDCCR_UDE);
1193 1204
1194#ifdef CONFIG_ARCH_PXA
1195 /* Enable clock for USB device */
1196 clk_enable(dev->clk);
1197#endif
1198
1199 /* try to clear these bits before we enable the udc */ 1205 /* try to clear these bits before we enable the udc */
1200 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); 1206 udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1201 1207
@@ -1286,7 +1292,7 @@ fail:
1286 * for set_configuration as well as eventual disconnect. 1292 * for set_configuration as well as eventual disconnect.
1287 */ 1293 */
1288 DMSG("registered gadget driver '%s'\n", driver->driver.name); 1294 DMSG("registered gadget driver '%s'\n", driver->driver.name);
1289 pullup(dev, 1); 1295 pullup(dev);
1290 dump_state(dev); 1296 dump_state(dev);
1291 return 0; 1297 return 0;
1292} 1298}
@@ -1329,7 +1335,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1329 return -EINVAL; 1335 return -EINVAL;
1330 1336
1331 local_irq_disable(); 1337 local_irq_disable();
1332 pullup(dev, 0); 1338 dev->pullup = 0;
1339 pullup(dev);
1333 stop_activity(dev, driver); 1340 stop_activity(dev, driver);
1334 local_irq_enable(); 1341 local_irq_enable();
1335 1342
@@ -2131,13 +2138,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev)
2131 if (irq < 0) 2138 if (irq < 0)
2132 return -ENODEV; 2139 return -ENODEV;
2133 2140
2134#ifdef CONFIG_ARCH_PXA
2135 dev->clk = clk_get(&pdev->dev, "UDCCLK"); 2141 dev->clk = clk_get(&pdev->dev, "UDCCLK");
2136 if (IS_ERR(dev->clk)) { 2142 if (IS_ERR(dev->clk)) {
2137 retval = PTR_ERR(dev->clk); 2143 retval = PTR_ERR(dev->clk);
2138 goto err_clk; 2144 goto err_clk;
2139 } 2145 }
2140#endif
2141 2146
2142 pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, 2147 pr_debug("%s: IRQ %d%s%s\n", driver_name, irq,
2143 dev->has_cfr ? "" : " (!cfr)", 2148 dev->has_cfr ? "" : " (!cfr)",
@@ -2250,10 +2255,8 @@ lubbock_fail0:
2250 if (dev->mach->gpio_vbus) 2255 if (dev->mach->gpio_vbus)
2251 gpio_free(dev->mach->gpio_vbus); 2256 gpio_free(dev->mach->gpio_vbus);
2252 err_gpio_vbus: 2257 err_gpio_vbus:
2253#ifdef CONFIG_ARCH_PXA
2254 clk_put(dev->clk); 2258 clk_put(dev->clk);
2255 err_clk: 2259 err_clk:
2256#endif
2257 return retval; 2260 return retval;
2258} 2261}
2259 2262
@@ -2269,7 +2272,9 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2269 if (dev->driver) 2272 if (dev->driver)
2270 return -EBUSY; 2273 return -EBUSY;
2271 2274
2272 udc_disable(dev); 2275 dev->pullup = 0;
2276 pullup(dev);
2277
2273 remove_debug_files(dev); 2278 remove_debug_files(dev);
2274 2279
2275 if (dev->got_irq) { 2280 if (dev->got_irq) {
@@ -2289,9 +2294,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2289 if (dev->mach->gpio_pullup) 2294 if (dev->mach->gpio_pullup)
2290 gpio_free(dev->mach->gpio_pullup); 2295 gpio_free(dev->mach->gpio_pullup);
2291 2296
2292#ifdef CONFIG_ARCH_PXA
2293 clk_put(dev->clk); 2297 clk_put(dev->clk);
2294#endif
2295 2298
2296 platform_set_drvdata(pdev, NULL); 2299 platform_set_drvdata(pdev, NULL);
2297 the_controller = NULL; 2300 the_controller = NULL;
@@ -2317,10 +2320,15 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev)
2317static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) 2320static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
2318{ 2321{
2319 struct pxa2xx_udc *udc = platform_get_drvdata(dev); 2322 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
2323 unsigned long flags;
2320 2324
2321 if (!udc->mach->gpio_pullup && !udc->mach->udc_command) 2325 if (!udc->mach->gpio_pullup && !udc->mach->udc_command)
2322 WARN("USB host won't detect disconnect!\n"); 2326 WARN("USB host won't detect disconnect!\n");
2323 pullup(udc, 0); 2327 udc->suspended = 1;
2328
2329 local_irq_save(flags);
2330 pullup(udc);
2331 local_irq_restore(flags);
2324 2332
2325 return 0; 2333 return 0;
2326} 2334}
@@ -2328,8 +2336,12 @@ static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state)
2328static int pxa2xx_udc_resume(struct platform_device *dev) 2336static int pxa2xx_udc_resume(struct platform_device *dev)
2329{ 2337{
2330 struct pxa2xx_udc *udc = platform_get_drvdata(dev); 2338 struct pxa2xx_udc *udc = platform_get_drvdata(dev);
2339 unsigned long flags;
2331 2340
2332 pullup(udc, 1); 2341 udc->suspended = 0;
2342 local_irq_save(flags);
2343 pullup(udc);
2344 local_irq_restore(flags);
2333 2345
2334 return 0; 2346 return 0;
2335} 2347}
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h
index b67e3ff5e4eb..e2c19e88c875 100644
--- a/drivers/usb/gadget/pxa2xx_udc.h
+++ b/drivers/usb/gadget/pxa2xx_udc.h
@@ -119,7 +119,9 @@ struct pxa2xx_udc {
119 has_cfr : 1, 119 has_cfr : 1,
120 req_pending : 1, 120 req_pending : 1,
121 req_std : 1, 121 req_std : 1,
122 req_config : 1; 122 req_config : 1,
123 suspended : 1,
124 active : 1;
123 125
124#define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) 126#define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200))
125 struct timer_list timer; 127 struct timer_list timer;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b8ad55aff842..46ee7f4c0912 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -281,23 +281,44 @@ static void ehci_iaa_watchdog(unsigned long param)
281{ 281{
282 struct ehci_hcd *ehci = (struct ehci_hcd *) param; 282 struct ehci_hcd *ehci = (struct ehci_hcd *) param;
283 unsigned long flags; 283 unsigned long flags;
284 u32 status, cmd;
285 284
286 spin_lock_irqsave (&ehci->lock, flags); 285 spin_lock_irqsave (&ehci->lock, flags);
287 WARN_ON(!ehci->reclaim);
288 286
289 status = ehci_readl(ehci, &ehci->regs->status); 287 /* Lost IAA irqs wedge things badly; seen first with a vt8235.
290 cmd = ehci_readl(ehci, &ehci->regs->command); 288 * So we need this watchdog, but must protect it against both
291 ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd); 289 * (a) SMP races against real IAA firing and retriggering, and
292 290 * (b) clean HC shutdown, when IAA watchdog was pending.
293 /* lost IAA irqs wedge things badly; seen first with a vt8235 */ 291 */
294 if (ehci->reclaim) { 292 if (ehci->reclaim
295 if (status & STS_IAA) { 293 && !timer_pending(&ehci->iaa_watchdog)
296 ehci_vdbg (ehci, "lost IAA\n"); 294 && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
295 u32 cmd, status;
296
297 /* If we get here, IAA is *REALLY* late. It's barely
298 * conceivable that the system is so busy that CMD_IAAD
299 * is still legitimately set, so let's be sure it's
300 * clear before we read STS_IAA. (The HC should clear
301 * CMD_IAAD when it sets STS_IAA.)
302 */
303 cmd = ehci_readl(ehci, &ehci->regs->command);
304 if (cmd & CMD_IAAD)
305 ehci_writel(ehci, cmd & ~CMD_IAAD,
306 &ehci->regs->command);
307
308 /* If IAA is set here it either legitimately triggered
309 * before we cleared IAAD above (but _way_ late, so we'll
310 * still count it as lost) ... or a silicon erratum:
311 * - VIA seems to set IAA without triggering the IRQ;
312 * - IAAD potentially cleared without setting IAA.
313 */
314 status = ehci_readl(ehci, &ehci->regs->status);
315 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
297 COUNT (ehci->stats.lost_iaa); 316 COUNT (ehci->stats.lost_iaa);
298 ehci_writel(ehci, STS_IAA, &ehci->regs->status); 317 ehci_writel(ehci, STS_IAA, &ehci->regs->status);
299 } 318 }
300 ehci_writel(ehci, cmd & ~CMD_IAAD, &ehci->regs->command); 319
320 ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n",
321 status, cmd);
301 end_unlink_async(ehci); 322 end_unlink_async(ehci);
302 } 323 }
303 324
@@ -631,7 +652,7 @@ static int ehci_run (struct usb_hcd *hcd)
631static irqreturn_t ehci_irq (struct usb_hcd *hcd) 652static irqreturn_t ehci_irq (struct usb_hcd *hcd)
632{ 653{
633 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 654 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
634 u32 status, pcd_status = 0; 655 u32 status, pcd_status = 0, cmd;
635 int bh; 656 int bh;
636 657
637 spin_lock (&ehci->lock); 658 spin_lock (&ehci->lock);
@@ -652,7 +673,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
652 673
653 /* clear (just) interrupts */ 674 /* clear (just) interrupts */
654 ehci_writel(ehci, status, &ehci->regs->status); 675 ehci_writel(ehci, status, &ehci->regs->status);
655 ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */ 676 cmd = ehci_readl(ehci, &ehci->regs->command);
656 bh = 0; 677 bh = 0;
657 678
658#ifdef EHCI_VERBOSE_DEBUG 679#ifdef EHCI_VERBOSE_DEBUG
@@ -673,8 +694,17 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
673 694
674 /* complete the unlinking of some qh [4.15.2.3] */ 695 /* complete the unlinking of some qh [4.15.2.3] */
675 if (status & STS_IAA) { 696 if (status & STS_IAA) {
676 COUNT (ehci->stats.reclaim); 697 /* guard against (alleged) silicon errata */
677 end_unlink_async(ehci); 698 if (cmd & CMD_IAAD) {
699 ehci_writel(ehci, cmd & ~CMD_IAAD,
700 &ehci->regs->command);
701 ehci_dbg(ehci, "IAA with IAAD still set?\n");
702 }
703 if (ehci->reclaim) {
704 COUNT(ehci->stats.reclaim);
705 end_unlink_async(ehci);
706 } else
707 ehci_dbg(ehci, "IAA with nothing to reclaim?\n");
678 } 708 }
679 709
680 /* remote wakeup [4.3.1] */ 710 /* remote wakeup [4.3.1] */
@@ -781,7 +811,7 @@ static int ehci_urb_enqueue (
781static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 811static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
782{ 812{
783 /* failfast */ 813 /* failfast */
784 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) 814 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
785 end_unlink_async(ehci); 815 end_unlink_async(ehci);
786 816
787 /* if it's not linked then there's nothing to do */ 817 /* if it's not linked then there's nothing to do */
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 776a97f33914..2e49de820b14 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -319,10 +319,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
319 if (likely (last->urb != urb)) { 319 if (likely (last->urb != urb)) {
320 ehci_urb_done(ehci, last->urb, last_status); 320 ehci_urb_done(ehci, last->urb, last_status);
321 count++; 321 count++;
322 last_status = -EINPROGRESS;
322 } 323 }
323 ehci_qtd_free (ehci, last); 324 ehci_qtd_free (ehci, last);
324 last = NULL; 325 last = NULL;
325 last_status = -EINPROGRESS;
326 } 326 }
327 327
328 /* ignore urbs submitted during completions we reported */ 328 /* ignore urbs submitted during completions we reported */
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 0130fd8571e4..d7071c855758 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -911,8 +911,7 @@ static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf)
911 buf[0] = 0; 911 buf[0] = 0;
912 912
913 for (i = 0; i < ports; i++) { 913 for (i = 0; i < ports; i++) {
914 u32 status = isp116x->rhport[i] = 914 u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1);
915 isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1);
916 915
917 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC 916 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC
918 | RH_PS_OCIC | RH_PS_PRSC)) { 917 | RH_PS_OCIC | RH_PS_PRSC)) {
@@ -1031,7 +1030,9 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
1031 DBG("GetPortStatus\n"); 1030 DBG("GetPortStatus\n");
1032 if (!wIndex || wIndex > ports) 1031 if (!wIndex || wIndex > ports)
1033 goto error; 1032 goto error;
1034 tmp = isp116x->rhport[--wIndex]; 1033 spin_lock_irqsave(&isp116x->lock, flags);
1034 tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1);
1035 spin_unlock_irqrestore(&isp116x->lock, flags);
1035 *(__le32 *) buf = cpu_to_le32(tmp); 1036 *(__le32 *) buf = cpu_to_le32(tmp);
1036 DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); 1037 DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp);
1037 break; 1038 break;
@@ -1080,8 +1081,6 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
1080 spin_lock_irqsave(&isp116x->lock, flags); 1081 spin_lock_irqsave(&isp116x->lock, flags);
1081 isp116x_write_reg32(isp116x, wIndex 1082 isp116x_write_reg32(isp116x, wIndex
1082 ? HCRHPORT2 : HCRHPORT1, tmp); 1083 ? HCRHPORT2 : HCRHPORT1, tmp);
1083 isp116x->rhport[wIndex] =
1084 isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1);
1085 spin_unlock_irqrestore(&isp116x->lock, flags); 1084 spin_unlock_irqrestore(&isp116x->lock, flags);
1086 break; 1085 break;
1087 case SetPortFeature: 1086 case SetPortFeature:
@@ -1095,24 +1094,22 @@ static int isp116x_hub_control(struct usb_hcd *hcd,
1095 spin_lock_irqsave(&isp116x->lock, flags); 1094 spin_lock_irqsave(&isp116x->lock, flags);
1096 isp116x_write_reg32(isp116x, wIndex 1095 isp116x_write_reg32(isp116x, wIndex
1097 ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); 1096 ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS);
1097 spin_unlock_irqrestore(&isp116x->lock, flags);
1098 break; 1098 break;
1099 case USB_PORT_FEAT_POWER: 1099 case USB_PORT_FEAT_POWER:
1100 DBG("USB_PORT_FEAT_POWER\n"); 1100 DBG("USB_PORT_FEAT_POWER\n");
1101 spin_lock_irqsave(&isp116x->lock, flags); 1101 spin_lock_irqsave(&isp116x->lock, flags);
1102 isp116x_write_reg32(isp116x, wIndex 1102 isp116x_write_reg32(isp116x, wIndex
1103 ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); 1103 ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS);
1104 spin_unlock_irqrestore(&isp116x->lock, flags);
1104 break; 1105 break;
1105 case USB_PORT_FEAT_RESET: 1106 case USB_PORT_FEAT_RESET:
1106 DBG("USB_PORT_FEAT_RESET\n"); 1107 DBG("USB_PORT_FEAT_RESET\n");
1107 root_port_reset(isp116x, wIndex); 1108 root_port_reset(isp116x, wIndex);
1108 spin_lock_irqsave(&isp116x->lock, flags);
1109 break; 1109 break;
1110 default: 1110 default:
1111 goto error; 1111 goto error;
1112 } 1112 }
1113 isp116x->rhport[wIndex] =
1114 isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1);
1115 spin_unlock_irqrestore(&isp116x->lock, flags);
1116 break; 1113 break;
1117 1114
1118 default: 1115 default:
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h
index b91e2edd9c5c..595b90a99848 100644
--- a/drivers/usb/host/isp116x.h
+++ b/drivers/usb/host/isp116x.h
@@ -270,7 +270,6 @@ struct isp116x {
270 u32 rhdesca; 270 u32 rhdesca;
271 u32 rhdescb; 271 u32 rhdescb;
272 u32 rhstatus; 272 u32 rhstatus;
273 u32 rhport[2];
274 273
275 /* async schedule: control, bulk */ 274 /* async schedule: control, bulk */
276 struct list_head async; 275 struct list_head async;
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index ba370c56172c..59be276ccd9d 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1766,6 +1766,7 @@ sl811h_suspend(struct platform_device *dev, pm_message_t state)
1766 retval = sl811h_bus_suspend(hcd); 1766 retval = sl811h_bus_suspend(hcd);
1767 break; 1767 break;
1768 case PM_EVENT_SUSPEND: 1768 case PM_EVENT_SUSPEND:
1769 case PM_EVENT_HIBERNATE:
1769 case PM_EVENT_PRETHAW: /* explicitly discard hw state */ 1770 case PM_EVENT_PRETHAW: /* explicitly discard hw state */
1770 port_power(sl811, 0); 1771 port_power(sl811, 0);
1771 break; 1772 break;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index ac283b09a63f..3033d6945202 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3213,15 +3213,20 @@ static int u132_suspend(struct platform_device *pdev, pm_message_t state)
3213 dev_err(&u132->platform_dev->dev, "device is being removed\n"); 3213 dev_err(&u132->platform_dev->dev, "device is being removed\n");
3214 return -ESHUTDOWN; 3214 return -ESHUTDOWN;
3215 } else { 3215 } else {
3216 int retval = 0; 3216 int retval = 0, ports;
3217 if (state.event == PM_EVENT_FREEZE) { 3217
3218 switch (state.event) {
3219 case PM_EVENT_FREEZE:
3218 retval = u132_bus_suspend(hcd); 3220 retval = u132_bus_suspend(hcd);
3219 } else if (state.event == PM_EVENT_SUSPEND) { 3221 break;
3220 int ports = MAX_U132_PORTS; 3222 case PM_EVENT_SUSPEND:
3223 case PM_EVENT_HIBERNATE:
3224 ports = MAX_U132_PORTS;
3221 while (ports-- > 0) { 3225 while (ports-- > 0) {
3222 port_power(u132, ports, 0); 3226 port_power(u132, ports, 0);
3223 } 3227 }
3224 } 3228 break;
3229 }
3225 if (retval == 0) 3230 if (retval == 0)
3226 pdev->dev.power.power_state = state; 3231 pdev->dev.power.power_state = state;
3227 return retval; 3232 return retval;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 08c65c1a3771..779d07851a4d 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -94,6 +94,7 @@ static struct usb_device_id id_table_earthmate [] = {
94 94
95static struct usb_device_id id_table_cyphidcomrs232 [] = { 95static struct usb_device_id id_table_cyphidcomrs232 [] = {
96 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 96 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
97 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
97 { } /* Terminating entry */ 98 { } /* Terminating entry */
98}; 99};
99 100
@@ -106,6 +107,7 @@ static struct usb_device_id id_table_combined [] = {
106 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, 107 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
107 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, 108 { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
108 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, 109 { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
110 { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
109 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, 111 { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
110 { } /* Terminating entry */ 112 { } /* Terminating entry */
111}; 113};
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index e1c7c27e18b7..0388065bb794 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -19,6 +19,10 @@
19#define VENDOR_ID_CYPRESS 0x04b4 19#define VENDOR_ID_CYPRESS 0x04b4
20#define PRODUCT_ID_CYPHIDCOM 0x5500 20#define PRODUCT_ID_CYPHIDCOM 0x5500
21 21
22/* Powercom UPS, chip CY7C63723 */
23#define VENDOR_ID_POWERCOM 0x0d9f
24#define PRODUCT_ID_UPS 0x0002
25
22/* Nokia CA-42 USB to serial cable */ 26/* Nokia CA-42 USB to serial cable */
23#define VENDOR_ID_DAZZLE 0x07d0 27#define VENDOR_ID_DAZZLE 0x07d0
24#define PRODUCT_ID_CA42 0x4101 28#define PRODUCT_ID_CA42 0x4101
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 76db2fef4657..3abb3c863647 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -92,6 +92,7 @@ struct ftdi_sio_quirk {
92}; 92};
93 93
94static int ftdi_jtag_probe (struct usb_serial *serial); 94static int ftdi_jtag_probe (struct usb_serial *serial);
95static int ftdi_mtxorb_hack_setup (struct usb_serial *serial);
95static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); 96static void ftdi_USB_UIRT_setup (struct ftdi_private *priv);
96static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); 97static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv);
97 98
@@ -99,6 +100,10 @@ static struct ftdi_sio_quirk ftdi_jtag_quirk = {
99 .probe = ftdi_jtag_probe, 100 .probe = ftdi_jtag_probe,
100}; 101};
101 102
103static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = {
104 .probe = ftdi_mtxorb_hack_setup,
105};
106
102static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { 107static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
103 .port_probe = ftdi_USB_UIRT_setup, 108 .port_probe = ftdi_USB_UIRT_setup,
104}; 109};
@@ -161,6 +166,8 @@ static struct usb_device_id id_table_combined [] = {
161 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, 166 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) },
162 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, 167 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
163 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, 168 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
169 { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID),
170 .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk },
164 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, 171 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
165 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, 172 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
166 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, 173 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
@@ -274,6 +281,7 @@ static struct usb_device_id id_table_combined [] = {
274 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, 281 { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
275 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, 282 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
276 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, 283 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
284 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
277 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, 285 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
278 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 286 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
279 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 287 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
@@ -351,6 +359,7 @@ static struct usb_device_id id_table_combined [] = {
351 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 359 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
352 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 360 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
353 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, 361 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
362 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
354 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), 363 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
355 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 364 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
356 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), 365 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
@@ -1088,6 +1097,23 @@ static int ftdi_jtag_probe(struct usb_serial *serial)
1088 return 0; 1097 return 0;
1089} 1098}
1090 1099
1100/*
1101 * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
1102 * We have to correct it if we want to read from it.
1103 */
1104static int ftdi_mtxorb_hack_setup(struct usb_serial *serial)
1105{
1106 struct usb_host_endpoint *ep = serial->dev->ep_in[1];
1107 struct usb_endpoint_descriptor *ep_desc = &ep->desc;
1108
1109 if (ep->enabled && ep_desc->wMaxPacketSize == 0) {
1110 ep_desc->wMaxPacketSize = 0x40;
1111 info("Fixing invalid wMaxPacketSize on read pipe");
1112 }
1113
1114 return 0;
1115}
1116
1091/* ftdi_shutdown is called from usbserial:usb_serial_disconnect 1117/* ftdi_shutdown is called from usbserial:usb_serial_disconnect
1092 * it is called when the usb device is disconnected 1118 * it is called when the usb device is disconnected
1093 * 1119 *
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 6eee2ab914ec..6da539ede0ee 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -102,6 +102,13 @@
102 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ 102 * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
103#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ 103#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
104 104
105/*
106 * The following are the values for the Matrix Orbital VK204-25-USB
107 * display, which use the FT232RL.
108 */
109#define MTXORB_VK_VID 0x1b3d
110#define MTXORB_VK_PID 0x0158
111
105/* Interbiometrics USB I/O Board */ 112/* Interbiometrics USB I/O Board */
106/* Developed for Interbiometrics by Rudolf Gugler */ 113/* Developed for Interbiometrics by Rudolf Gugler */
107#define INTERBIOMETRICS_VID 0x1209 114#define INTERBIOMETRICS_VID 0x1209
@@ -550,6 +557,9 @@
550#define TML_VID 0x1B91 /* Vendor ID */ 557#define TML_VID 0x1B91 /* Vendor ID */
551#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */ 558#define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */
552 559
560/* Propox devices */
561#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
562
553/* Commands */ 563/* Commands */
554#define FTDI_SIO_RESET 0 /* Reset the port */ 564#define FTDI_SIO_RESET 0 /* Reset the port */
555#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ 565#define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 97fa3c428435..7cfce9dabb90 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -323,7 +323,7 @@ static void flush_and_resubmit_read_urb (struct usb_serial_port *port)
323 room = tty_buffer_request_room(tty, urb->actual_length); 323 room = tty_buffer_request_room(tty, urb->actual_length);
324 if (room) { 324 if (room) {
325 tty_insert_flip_string(tty, urb->transfer_buffer, room); 325 tty_insert_flip_string(tty, urb->transfer_buffer, room);
326 tty_flip_buffer_push(tty); /* is this allowed from an URB callback ? */ 326 tty_flip_buffer_push(tty);
327 } 327 }
328 } 328 }
329 329
@@ -349,10 +349,12 @@ void usb_serial_generic_read_bulk_callback (struct urb *urb)
349 349
350 /* Throttle the device if requested by tty */ 350 /* Throttle the device if requested by tty */
351 spin_lock_irqsave(&port->lock, flags); 351 spin_lock_irqsave(&port->lock, flags);
352 if (!(port->throttled = port->throttle_req)) 352 if (!(port->throttled = port->throttle_req)) {
353 /* Handle data and continue reading from device */ 353 spin_unlock_irqrestore(&port->lock, flags);
354 flush_and_resubmit_read_urb(port); 354 flush_and_resubmit_read_urb(port);
355 spin_unlock_irqrestore(&port->lock, flags); 355 } else {
356 spin_unlock_irqrestore(&port->lock, flags);
357 }
356} 358}
357EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback); 359EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback);
358 360
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 869ecd374cb4..aeeb9cb20999 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -110,11 +110,20 @@
110 110
111/* vendor id and device id defines */ 111/* vendor id and device id defines */
112 112
113/* The native mos7840/7820 component */
113#define USB_VENDOR_ID_MOSCHIP 0x9710 114#define USB_VENDOR_ID_MOSCHIP 0x9710
114#define MOSCHIP_DEVICE_ID_7840 0x7840 115#define MOSCHIP_DEVICE_ID_7840 0x7840
115#define MOSCHIP_DEVICE_ID_7820 0x7820 116#define MOSCHIP_DEVICE_ID_7820 0x7820
117/* The native component can have its vendor/device id's overridden
118 * in vendor-specific implementations. Such devices can be handled
119 * by making a change here, in moschip_port_id_table, and in
120 * moschip_id_table_combined
121 */
122#define USB_VENDOR_ID_BANDB 0x0856
123#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
124#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
116 125
117/* Interrupt Rotinue Defines */ 126/* Interrupt Routine Defines */
118 127
119#define SERIAL_IIR_RLS 0x06 128#define SERIAL_IIR_RLS 0x06
120#define SERIAL_IIR_MS 0x00 129#define SERIAL_IIR_MS 0x00
@@ -159,12 +168,16 @@
159static struct usb_device_id moschip_port_id_table[] = { 168static struct usb_device_id moschip_port_id_table[] = {
160 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 169 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
161 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 170 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
171 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
172 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
162 {} /* terminating entry */ 173 {} /* terminating entry */
163}; 174};
164 175
165static __devinitdata struct usb_device_id moschip_id_table_combined[] = { 176static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
166 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 177 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
167 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 178 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
179 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
180 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
168 {} /* terminating entry */ 181 {} /* terminating entry */
169}; 182};
170 183
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index af2674c57414..a396fbbdc9c2 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -111,6 +111,42 @@ static int option_send_setup(struct usb_serial_port *port);
111#define HUAWEI_PRODUCT_E220BIS 0x1004 111#define HUAWEI_PRODUCT_E220BIS 0x1004
112 112
113#define NOVATELWIRELESS_VENDOR_ID 0x1410 113#define NOVATELWIRELESS_VENDOR_ID 0x1410
114
115/* MERLIN EVDO PRODUCTS */
116#define NOVATELWIRELESS_PRODUCT_V640 0x1100
117#define NOVATELWIRELESS_PRODUCT_V620 0x1110
118#define NOVATELWIRELESS_PRODUCT_V740 0x1120
119#define NOVATELWIRELESS_PRODUCT_V720 0x1130
120
121/* MERLIN HSDPA/HSPA PRODUCTS */
122#define NOVATELWIRELESS_PRODUCT_U730 0x1400
123#define NOVATELWIRELESS_PRODUCT_U740 0x1410
124#define NOVATELWIRELESS_PRODUCT_U870 0x1420
125#define NOVATELWIRELESS_PRODUCT_XU870 0x1430
126#define NOVATELWIRELESS_PRODUCT_X950D 0x1450
127
128/* EXPEDITE PRODUCTS */
129#define NOVATELWIRELESS_PRODUCT_EV620 0x2100
130#define NOVATELWIRELESS_PRODUCT_ES720 0x2110
131#define NOVATELWIRELESS_PRODUCT_E725 0x2120
132#define NOVATELWIRELESS_PRODUCT_EU730 0x2400
133#define NOVATELWIRELESS_PRODUCT_EU740 0x2410
134#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420
135
136/* OVATION PRODUCTS */
137#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
138#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
139
140/* FUTURE NOVATEL PRODUCTS */
141#define NOVATELWIRELESS_PRODUCT_EVDO_1 0x6000
142#define NOVATELWIRELESS_PRODUCT_HSPA_1 0x7000
143#define NOVATELWIRELESS_PRODUCT_EMBEDDED_1 0x8000
144#define NOVATELWIRELESS_PRODUCT_GLOBAL_1 0x9000
145#define NOVATELWIRELESS_PRODUCT_EVDO_2 0x6001
146#define NOVATELWIRELESS_PRODUCT_HSPA_2 0x7001
147#define NOVATELWIRELESS_PRODUCT_EMBEDDED_2 0x8001
148#define NOVATELWIRELESS_PRODUCT_GLOBAL_2 0x9001
149
114#define DELL_VENDOR_ID 0x413C 150#define DELL_VENDOR_ID 0x413C
115 151
116#define KYOCERA_VENDOR_ID 0x0c88 152#define KYOCERA_VENDOR_ID 0x0c88
@@ -120,6 +156,9 @@ static int option_send_setup(struct usb_serial_port *port);
120#define ANYDATA_PRODUCT_ADU_E100A 0x6501 156#define ANYDATA_PRODUCT_ADU_E100A 0x6501
121#define ANYDATA_PRODUCT_ADU_500A 0x6502 157#define ANYDATA_PRODUCT_ADU_500A 0x6502
122 158
159#define AXESSTEL_VENDOR_ID 0x1726
160#define AXESSTEL_PRODUCT_MV110H 0x1000
161
123#define BANDRICH_VENDOR_ID 0x1A8D 162#define BANDRICH_VENDOR_ID 0x1A8D
124#define BANDRICH_PRODUCT_C100_1 0x1002 163#define BANDRICH_PRODUCT_C100_1 0x1002
125#define BANDRICH_PRODUCT_C100_2 0x1003 164#define BANDRICH_PRODUCT_C100_2 0x1003
@@ -165,21 +204,34 @@ static struct usb_device_id option_ids[] = {
165 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, 204 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) },
166 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, 205 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
167 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, 206 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
168 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1100) }, /* Novatel Merlin XS620/S640 */ 207 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */
169 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1110) }, /* Novatel Merlin S620 */ 208 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */
170 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1120) }, /* Novatel Merlin EX720 */ 209 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */
171 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1130) }, /* Novatel Merlin S720 */ 210 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */
172 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1400) }, /* Novatel U730 */ 211 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */
173 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1410) }, /* Novatel U740 */ 212 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */
174 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1420) }, /* Novatel EU870 */ 213 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */
175 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1430) }, /* Novatel Merlin XU870 HSDPA/3G */ 214 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */
176 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2100) }, /* Novatel EV620 CDMA/EV-DO */ 215 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */
177 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2110) }, /* Novatel Merlin ES620 / Merlin ES720 / Ovation U720 */ 216 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */
217 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */
218 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */
178 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */ 219 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */
179 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2410) }, /* Novatel EU740 */ 220 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */
180 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4100) }, /* Novatel U727 */ 221 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */
181 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4400) }, /* Novatel MC950 */ 222 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */
223 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */
224 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */
182 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x5010) }, /* Novatel U727 */ 225 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x5010) }, /* Novatel U727 */
226 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */
227 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */
228 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */
229 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_1) }, /* Novatel Global product */
230 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_2) }, /* Novatel EVDO product */
231 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_2) }, /* Novatel HSPA product */
232 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_2) }, /* Novatel Embedded product */
233 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_2) }, /* Novatel Global product */
234
183 { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ 235 { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
184 { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ 236 { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
185 { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ 237 { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
@@ -192,6 +244,7 @@ static struct usb_device_id option_ids[] = {
192 { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ 244 { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */
193 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, 245 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },
194 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, 246 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
247 { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
195 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, 248 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
196 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, 249 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
197 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 250 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 958f5b17847c..b9b8ede61fb3 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -170,7 +170,6 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
170 170
171 if (!sg) 171 if (!sg)
172 sg = scsi_sglist(srb); 172 sg = scsi_sglist(srb);
173 buflen = min(buflen, scsi_bufflen(srb));
174 173
175 /* This loop handles a single s-g list entry, which may 174 /* This loop handles a single s-g list entry, which may
176 * include multiple pages. Find the initial page structure 175 * include multiple pages. Find the initial page structure
@@ -232,6 +231,7 @@ void usb_stor_set_xfer_buf(unsigned char *buffer,
232 unsigned int offset = 0; 231 unsigned int offset = 0;
233 struct scatterlist *sg = NULL; 232 struct scatterlist *sg = NULL;
234 233
234 buflen = min(buflen, scsi_bufflen(srb));
235 buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, 235 buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
236 TO_XFER_BUF); 236 TO_XFER_BUF);
237 if (buflen < scsi_bufflen(srb)) 237 if (buflen < scsi_bufflen(srb))
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index d43a3415e12f..6d14327c921d 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -522,8 +522,8 @@ int sddr55_reset(struct us_data *us) {
522 522
523static unsigned long sddr55_get_capacity(struct us_data *us) { 523static unsigned long sddr55_get_capacity(struct us_data *us) {
524 524
525 unsigned char manufacturerID; 525 unsigned char uninitialized_var(manufacturerID);
526 unsigned char deviceID; 526 unsigned char uninitialized_var(deviceID);
527 int result; 527 int result;
528 struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; 528 struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra;
529 529
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 758435f8a6f8..e0b0580705e4 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -553,6 +553,19 @@ config FB_BF54X_LQ043
553 help 553 help
554 This is the framebuffer device driver for a SHARP LQ043T1DG01 TFT LCD 554 This is the framebuffer device driver for a SHARP LQ043T1DG01 TFT LCD
555 555
556config FB_BFIN_T350MCQB
557 tristate "Varitronix COG-T350MCQB TFT LCD display (BF527 EZKIT)"
558 depends on FB && BLACKFIN
559 select BFIN_GPTIMERS
560 select FB_CFB_FILLRECT
561 select FB_CFB_COPYAREA
562 select FB_CFB_IMAGEBLIT
563 help
564 This is the framebuffer device driver for a Varitronix VL-PS-COG-T350MCQB-01 display TFT LCD
565 This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI
566 It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK.
567
568
556config FB_STI 569config FB_STI
557 tristate "HP STI frame buffer device support" 570 tristate "HP STI frame buffer device support"
558 depends on FB && PARISC 571 depends on FB && PARISC
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 83e02b3429b6..03371c789039 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_FB_EFI) += efifb.o
122obj-$(CONFIG_FB_VGA16) += vga16fb.o 122obj-$(CONFIG_FB_VGA16) += vga16fb.o
123obj-$(CONFIG_FB_OF) += offb.o 123obj-$(CONFIG_FB_OF) += offb.o
124obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o 124obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
125obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
125 126
126# the test framebuffer is last 127# the test framebuffer is last
127obj-$(CONFIG_FB_VIRTUAL) += vfb.o 128obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 0ce791e6f79c..986a550c0439 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -8,7 +8,7 @@
8 * 8 *
9 * 9 *
10 * Modified: 10 * Modified:
11 * Copyright 2004-2007 Analog Devices Inc. 11 * Copyright 2007-2008 Analog Devices Inc.
12 * 12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 * 14 *
@@ -241,7 +241,7 @@ static int request_ports(struct bfin_bf54xfb_info *fbi)
241 u16 eppi_req_18[] = EPPI0_18; 241 u16 eppi_req_18[] = EPPI0_18;
242 u16 disp = fbi->mach_info->disp; 242 u16 disp = fbi->mach_info->disp;
243 243
244 if (gpio_request(disp, NULL)) { 244 if (gpio_request(disp, DRIVER_NAME)) {
245 printk(KERN_ERR "Requesting GPIO %d faild\n", disp); 245 printk(KERN_ERR "Requesting GPIO %d faild\n", disp);
246 return -EFAULT; 246 return -EFAULT;
247 } 247 }
@@ -672,7 +672,7 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
672 &bfin_lq043fb_bl_ops); 672 &bfin_lq043fb_bl_ops);
673 bl_dev->props.max_brightness = 255; 673 bl_dev->props.max_brightness = 255;
674 674
675 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); 675 lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
676 lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); 676 lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n");
677#endif 677#endif
678 678
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
new file mode 100644
index 000000000000..a2bb2de9e020
--- /dev/null
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -0,0 +1,685 @@
1/*
2 * File: drivers/video/bfin-t350mcqb-fb.c
3 * Based on:
4 * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
5 *
6 * Created:
7 * Description: Blackfin LCD Framebufer driver
8 *
9 *
10 * Modified:
11 * Copyright 2004-2007 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see the file COPYING, or write
27 * to the Free Software Foundation, Inc.,
28 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 */
30
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/errno.h>
34#include <linux/string.h>
35#include <linux/fb.h>
36#include <linux/init.h>
37#include <linux/types.h>
38#include <linux/interrupt.h>
39#include <linux/device.h>
40#include <linux/backlight.h>
41#include <linux/lcd.h>
42#include <linux/dma-mapping.h>
43#include <linux/platform_device.h>
44
45#include <asm/blackfin.h>
46#include <asm/irq.h>
47#include <asm/dma-mapping.h>
48#include <asm/dma.h>
49#include <asm/portmux.h>
50#include <asm/gptimers.h>
51
52#define NO_BL_SUPPORT
53
54#define LCD_X_RES 320 /* Horizontal Resolution */
55#define LCD_Y_RES 240 /* Vertical Resolution */
56#define LCD_BPP 24 /* Bit Per Pixel */
57
58#define DMA_BUS_SIZE 16
59#define LCD_CLK (12*1000*1000) /* 12MHz */
60
61#define CLOCKS_PER_PIX 3
62
63 /*
64 * HS and VS timing parameters (all in number of PPI clk ticks)
65 */
66
67#define U_LINE 1 /* Blanking Lines */
68
69#define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */
70#define H_PERIOD (408 * CLOCKS_PER_PIX) /* HS period */
71#define H_PULSE 90 /* HS pulse width */
72#define H_START 204 /* first valid pixel */
73
74#define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */
75#define V_PULSE (3 * H_PERIOD) /* VS pulse width (1-5 H_PERIODs) */
76#define V_PERIOD (H_PERIOD * V_LINES) /* VS period */
77
78#define ACTIVE_VIDEO_MEM_OFFSET (U_LINE * H_ACTPIX)
79
80#define BFIN_LCD_NBR_PALETTE_ENTRIES 256
81
82#define DRIVER_NAME "bfin-t350mcqb"
83static char driver_name[] = DRIVER_NAME;
84
85struct bfin_t350mcqbfb_info {
86 struct fb_info *fb;
87 struct device *dev;
88 unsigned char *fb_buffer; /* RGB Buffer */
89 dma_addr_t dma_handle;
90 int lq043_mmap;
91 int lq043_open_cnt;
92 int irq;
93 spinlock_t lock; /* lock */
94};
95
96static int nocursor;
97module_param(nocursor, int, 0644);
98MODULE_PARM_DESC(nocursor, "cursor enable/disable");
99
100#define PPI_TX_MODE 0x2
101#define PPI_XFER_TYPE_11 0xC
102#define PPI_PORT_CFG_01 0x10
103#define PPI_PACK_EN 0x80
104#define PPI_POLS_1 0x8000
105
106static void bfin_t350mcqb_config_ppi(struct bfin_t350mcqbfb_info *fbi)
107{
108 bfin_write_PPI_DELAY(H_START);
109 bfin_write_PPI_COUNT(H_ACTPIX-1);
110 bfin_write_PPI_FRAME(V_LINES);
111
112 bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */
113 PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */
114 PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */
115 PPI_PACK_EN | /* packing enabled PACK_EN */
116 PPI_POLS_1); /* faling edge syncs POLS */
117}
118
119static inline void bfin_t350mcqb_disable_ppi(void)
120{
121 bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() & ~PORT_EN);
122}
123
124static inline void bfin_t350mcqb_enable_ppi(void)
125{
126 bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN);
127}
128
129static void bfin_t350mcqb_start_timers(void)
130{
131 unsigned long flags;
132
133 local_irq_save(flags);
134 enable_gptimers(TIMER1bit);
135 enable_gptimers(TIMER0bit);
136 local_irq_restore(flags);
137}
138
139static void bfin_t350mcqb_stop_timers(void)
140{
141 disable_gptimers(TIMER0bit | TIMER1bit);
142
143 set_gptimer_status(0, TIMER_STATUS_TRUN0 | TIMER_STATUS_TRUN1 |
144 TIMER_STATUS_TIMIL0 | TIMER_STATUS_TIMIL1 |
145 TIMER_STATUS_TOVF0 | TIMER_STATUS_TOVF1);
146
147}
148
149static void bfin_t350mcqb_init_timers(void)
150{
151
152 bfin_t350mcqb_stop_timers();
153
154 set_gptimer_period(TIMER0_id, H_PERIOD);
155 set_gptimer_pwidth(TIMER0_id, H_PULSE);
156 set_gptimer_config(TIMER0_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
157 TIMER_TIN_SEL | TIMER_CLK_SEL|
158 TIMER_EMU_RUN);
159
160 set_gptimer_period(TIMER1_id, V_PERIOD);
161 set_gptimer_pwidth(TIMER1_id, V_PULSE);
162 set_gptimer_config(TIMER1_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT |
163 TIMER_TIN_SEL | TIMER_CLK_SEL |
164 TIMER_EMU_RUN);
165
166}
167
168static void bfin_t350mcqb_config_dma(struct bfin_t350mcqbfb_info *fbi)
169{
170
171 set_dma_config(CH_PPI,
172 set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO,
173 INTR_DISABLE, DIMENSION_2D,
174 DATA_SIZE_16,
175 DMA_NOSYNC_KEEP_DMA_BUF));
176 set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE);
177 set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8);
178 set_dma_y_count(CH_PPI, V_LINES);
179
180 set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8);
181 set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer);
182
183}
184
185static int bfin_t350mcqb_request_ports(int action)
186{
187 u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
188 P_PPI0_D0, P_PPI0_D1, P_PPI0_D2,
189 P_PPI0_D3, P_PPI0_D4, P_PPI0_D5,
190 P_PPI0_D6, P_PPI0_D7, 0};
191
192 if (action) {
193 if (peripheral_request_list(ppi0_req_8, DRIVER_NAME)) {
194 printk(KERN_ERR "Requesting Peripherals faild\n");
195 return -EFAULT;
196 }
197 } else
198 peripheral_free_list(ppi0_req_8);
199
200 return 0;
201}
202
203static int bfin_t350mcqb_fb_open(struct fb_info *info, int user)
204{
205 struct bfin_t350mcqbfb_info *fbi = info->par;
206
207 spin_lock(&fbi->lock);
208 fbi->lq043_open_cnt++;
209
210 if (fbi->lq043_open_cnt <= 1) {
211
212 bfin_t350mcqb_disable_ppi();
213 SSYNC();
214
215 bfin_t350mcqb_config_dma(fbi);
216 bfin_t350mcqb_config_ppi(fbi);
217 bfin_t350mcqb_init_timers();
218
219 /* start dma */
220 enable_dma(CH_PPI);
221 bfin_t350mcqb_enable_ppi();
222 bfin_t350mcqb_start_timers();
223 }
224
225 spin_unlock(&fbi->lock);
226
227 return 0;
228}
229
230static int bfin_t350mcqb_fb_release(struct fb_info *info, int user)
231{
232 struct bfin_t350mcqbfb_info *fbi = info->par;
233
234 spin_lock(&fbi->lock);
235
236 fbi->lq043_open_cnt--;
237 fbi->lq043_mmap = 0;
238
239 if (fbi->lq043_open_cnt <= 0) {
240 bfin_t350mcqb_disable_ppi();
241 SSYNC();
242 disable_dma(CH_PPI);
243 bfin_t350mcqb_stop_timers();
244 memset(fbi->fb_buffer, 0, info->fix.smem_len);
245 }
246
247 spin_unlock(&fbi->lock);
248
249 return 0;
250}
251
252static int bfin_t350mcqb_fb_check_var(struct fb_var_screeninfo *var,
253 struct fb_info *info)
254{
255
256 if (var->bits_per_pixel != LCD_BPP) {
257 pr_debug("%s: depth not supported: %u BPP\n", __FUNCTION__,
258 var->bits_per_pixel);
259 return -EINVAL;
260 }
261
262 if (info->var.xres != var->xres || info->var.yres != var->yres ||
263 info->var.xres_virtual != var->xres_virtual ||
264 info->var.yres_virtual != var->yres_virtual) {
265 pr_debug("%s: Resolution not supported: X%u x Y%u \n",
266 __FUNCTION__, var->xres, var->yres);
267 return -EINVAL;
268 }
269
270 /*
271 * Memory limit
272 */
273
274 if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
275 pr_debug("%s: Memory Limit requested yres_virtual = %u\n",
276 __FUNCTION__, var->yres_virtual);
277 return -ENOMEM;
278 }
279
280 return 0;
281}
282
283static int bfin_t350mcqb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
284{
285 struct bfin_t350mcqbfb_info *fbi = info->par;
286
287 if (fbi->lq043_mmap)
288 return -1;
289
290 spin_lock(&fbi->lock);
291 fbi->lq043_mmap = 1;
292 spin_unlock(&fbi->lock);
293
294 vma->vm_start = (unsigned long)(fbi->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET);
295
296 vma->vm_end = vma->vm_start + info->fix.smem_len;
297 /* For those who don't understand how mmap works, go read
298 * Documentation/nommu-mmap.txt.
299 * For those that do, you will know that the VM_MAYSHARE flag
300 * must be set in the vma->vm_flags structure on noMMU
301 * Other flags can be set, and are documented in
302 * include/linux/mm.h
303 */
304 vma->vm_flags |= VM_MAYSHARE;
305
306 return 0;
307}
308
309int bfin_t350mcqb_fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
310{
311 if (nocursor)
312 return 0;
313 else
314 return -EINVAL; /* just to force soft_cursor() call */
315}
316
317static int bfin_t350mcqb_fb_setcolreg(u_int regno, u_int red, u_int green,
318 u_int blue, u_int transp,
319 struct fb_info *info)
320{
321 if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES)
322 return -EINVAL;
323
324 if (info->var.grayscale) {
325 /* grayscale = 0.30*R + 0.59*G + 0.11*B */
326 red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
327 }
328
329 if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
330
331 u32 value;
332 /* Place color in the pseudopalette */
333 if (regno > 16)
334 return -EINVAL;
335
336 red >>= (16 - info->var.red.length);
337 green >>= (16 - info->var.green.length);
338 blue >>= (16 - info->var.blue.length);
339
340 value = (red << info->var.red.offset) |
341 (green << info->var.green.offset) |
342 (blue << info->var.blue.offset);
343 value &= 0xFFFFFF;
344
345 ((u32 *) (info->pseudo_palette))[regno] = value;
346
347 }
348
349 return 0;
350}
351
352static struct fb_ops bfin_t350mcqb_fb_ops = {
353 .owner = THIS_MODULE,
354 .fb_open = bfin_t350mcqb_fb_open,
355 .fb_release = bfin_t350mcqb_fb_release,
356 .fb_check_var = bfin_t350mcqb_fb_check_var,
357 .fb_fillrect = cfb_fillrect,
358 .fb_copyarea = cfb_copyarea,
359 .fb_imageblit = cfb_imageblit,
360 .fb_mmap = bfin_t350mcqb_fb_mmap,
361 .fb_cursor = bfin_t350mcqb_fb_cursor,
362 .fb_setcolreg = bfin_t350mcqb_fb_setcolreg,
363};
364
365#ifndef NO_BL_SUPPORT
366static int bl_get_brightness(struct backlight_device *bd)
367{
368 return 0;
369}
370
371static struct backlight_ops bfin_lq043fb_bl_ops = {
372 .get_brightness = bl_get_brightness,
373};
374
375static struct backlight_device *bl_dev;
376
377static int bfin_lcd_get_power(struct lcd_device *dev)
378{
379 return 0;
380}
381
382static int bfin_lcd_set_power(struct lcd_device *dev, int power)
383{
384 return 0;
385}
386
387static int bfin_lcd_get_contrast(struct lcd_device *dev)
388{
389 return 0;
390}
391
392static int bfin_lcd_set_contrast(struct lcd_device *dev, int contrast)
393{
394
395 return 0;
396}
397
398static int bfin_lcd_check_fb(struct fb_info *fi)
399{
400 if (!fi || (fi == &bfin_t350mcqb_fb))
401 return 1;
402 return 0;
403}
404
405static struct lcd_ops bfin_lcd_ops = {
406 .get_power = bfin_lcd_get_power,
407 .set_power = bfin_lcd_set_power,
408 .get_contrast = bfin_lcd_get_contrast,
409 .set_contrast = bfin_lcd_set_contrast,
410 .check_fb = bfin_lcd_check_fb,
411};
412
413static struct lcd_device *lcd_dev;
414#endif
415
416static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id)
417{
418 /*struct bfin_t350mcqbfb_info *info = (struct bfin_t350mcqbfb_info *)dev_id;*/
419
420 u16 status = bfin_read_PPI_STATUS();
421 bfin_write_PPI_STATUS(0xFFFF);
422
423 if (status) {
424 bfin_t350mcqb_disable_ppi();
425 disable_dma(CH_PPI);
426
427 /* start dma */
428 enable_dma(CH_PPI);
429 bfin_t350mcqb_enable_ppi();
430 bfin_write_PPI_STATUS(0xFFFF);
431 }
432
433 return IRQ_HANDLED;
434}
435
436static int __init bfin_t350mcqb_probe(struct platform_device *pdev)
437{
438 struct bfin_t350mcqbfb_info *info;
439 struct fb_info *fbinfo;
440 int ret;
441
442 printk(KERN_INFO DRIVER_NAME ": %dx%d %d-bit RGB FrameBuffer initializing...\n",
443 LCD_X_RES, LCD_Y_RES, LCD_BPP);
444
445 if (request_dma(CH_PPI, "CH_PPI") < 0) {
446 printk(KERN_ERR DRIVER_NAME
447 ": couldn't request CH_PPI DMA\n");
448 ret = -EFAULT;
449 goto out1;
450 }
451
452 fbinfo =
453 framebuffer_alloc(sizeof(struct bfin_t350mcqbfb_info), &pdev->dev);
454 if (!fbinfo) {
455 ret = -ENOMEM;
456 goto out2;
457 }
458
459 info = fbinfo->par;
460 info->fb = fbinfo;
461 info->dev = &pdev->dev;
462
463 platform_set_drvdata(pdev, fbinfo);
464
465 strcpy(fbinfo->fix.id, driver_name);
466
467 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
468 fbinfo->fix.type_aux = 0;
469 fbinfo->fix.xpanstep = 0;
470 fbinfo->fix.ypanstep = 0;
471 fbinfo->fix.ywrapstep = 0;
472 fbinfo->fix.accel = FB_ACCEL_NONE;
473 fbinfo->fix.visual = FB_VISUAL_TRUECOLOR;
474
475 fbinfo->var.nonstd = 0;
476 fbinfo->var.activate = FB_ACTIVATE_NOW;
477 fbinfo->var.height = -1;
478 fbinfo->var.width = -1;
479 fbinfo->var.accel_flags = 0;
480 fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
481
482 fbinfo->var.xres = LCD_X_RES;
483 fbinfo->var.xres_virtual = LCD_X_RES;
484 fbinfo->var.yres = LCD_Y_RES;
485 fbinfo->var.yres_virtual = LCD_Y_RES;
486 fbinfo->var.bits_per_pixel = LCD_BPP;
487
488 fbinfo->var.red.offset = 0;
489 fbinfo->var.green.offset = 8;
490 fbinfo->var.blue.offset = 16;
491 fbinfo->var.transp.offset = 0;
492 fbinfo->var.red.length = 8;
493 fbinfo->var.green.length = 8;
494 fbinfo->var.blue.length = 8;
495 fbinfo->var.transp.length = 0;
496 fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8;
497
498 fbinfo->fix.line_length = fbinfo->var.xres_virtual *
499 fbinfo->var.bits_per_pixel / 8;
500
501
502 fbinfo->fbops = &bfin_t350mcqb_fb_ops;
503 fbinfo->flags = FBINFO_FLAG_DEFAULT;
504
505 info->fb_buffer =
506 dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle,
507 GFP_KERNEL);
508
509 if (NULL == info->fb_buffer) {
510 printk(KERN_ERR DRIVER_NAME
511 ": couldn't allocate dma buffer.\n");
512 ret = -ENOMEM;
513 goto out3;
514 }
515
516 memset(info->fb_buffer, 0, fbinfo->fix.smem_len);
517
518 fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
519 fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET;
520
521 fbinfo->fbops = &bfin_t350mcqb_fb_ops;
522
523 fbinfo->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
524 if (!fbinfo->pseudo_palette) {
525 printk(KERN_ERR DRIVER_NAME
526 "Fail to allocate pseudo_palette\n");
527
528 ret = -ENOMEM;
529 goto out4;
530 }
531
532 memset(fbinfo->pseudo_palette, 0, sizeof(u32) * 16);
533
534 if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0)
535 < 0) {
536 printk(KERN_ERR DRIVER_NAME
537 "Fail to allocate colormap (%d entries)\n",
538 BFIN_LCD_NBR_PALETTE_ENTRIES);
539 ret = -EFAULT;
540 goto out5;
541 }
542
543 if (bfin_t350mcqb_request_ports(1)) {
544 printk(KERN_ERR DRIVER_NAME ": couldn't request gpio port.\n");
545 ret = -EFAULT;
546 goto out6;
547 }
548
549 info->irq = platform_get_irq(pdev, 0);
550 if (info->irq < 0) {
551 ret = -EINVAL;
552 goto out7;
553 }
554
555 if (request_irq(info->irq, (void *)bfin_t350mcqb_irq_error, IRQF_DISABLED,
556 "PPI ERROR", info) < 0) {
557 printk(KERN_ERR DRIVER_NAME
558 ": unable to request PPI ERROR IRQ\n");
559 ret = -EFAULT;
560 goto out7;
561 }
562
563 if (register_framebuffer(fbinfo) < 0) {
564 printk(KERN_ERR DRIVER_NAME
565 ": unable to register framebuffer.\n");
566 ret = -EINVAL;
567 goto out8;
568 }
569#ifndef NO_BL_SUPPORT
570 bl_dev =
571 backlight_device_register("bf52x-bl", NULL, NULL,
572 &bfin_lq043fb_bl_ops);
573 bl_dev->props.max_brightness = 255;
574
575 lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
576 lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n");
577#endif
578
579 return 0;
580
581out8:
582 free_irq(info->irq, info);
583out7:
584 bfin_t350mcqb_request_ports(0);
585out6:
586 fb_dealloc_cmap(&fbinfo->cmap);
587out5:
588 kfree(fbinfo->pseudo_palette);
589out4:
590 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
591 info->dma_handle);
592out3:
593 framebuffer_release(fbinfo);
594out2:
595 free_dma(CH_PPI);
596out1:
597 platform_set_drvdata(pdev, NULL);
598
599 return ret;
600}
601
602static int bfin_t350mcqb_remove(struct platform_device *pdev)
603{
604
605 struct fb_info *fbinfo = platform_get_drvdata(pdev);
606 struct bfin_t350mcqbfb_info *info = fbinfo->par;
607
608 free_dma(CH_PPI);
609 free_irq(info->irq, info);
610
611 if (info->fb_buffer != NULL)
612 dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer,
613 info->dma_handle);
614
615 kfree(fbinfo->pseudo_palette);
616 fb_dealloc_cmap(&fbinfo->cmap);
617
618#ifndef NO_BL_SUPPORT
619 lcd_device_unregister(lcd_dev);
620 backlight_device_unregister(bl_dev);
621#endif
622
623 unregister_framebuffer(fbinfo);
624
625 bfin_t350mcqb_request_ports(0);
626
627 printk(KERN_INFO DRIVER_NAME ": Unregister LCD driver.\n");
628
629 return 0;
630}
631
632#ifdef CONFIG_PM
633static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state)
634{
635 struct fb_info *fbinfo = platform_get_drvdata(pdev);
636 struct bfin_t350mcqbfb_info *info = fbinfo->par;
637
638 bfin_t350mcqb_disable_ppi();
639 disable_dma(CH_PPI);
640 bfin_write_PPI_STATUS(0xFFFF);
641
642 return 0;
643}
644
645static int bfin_t350mcqb_resume(struct platform_device *pdev)
646{
647 struct fb_info *fbinfo = platform_get_drvdata(pdev);
648 struct bfin_t350mcqbfb_info *info = fbinfo->par;
649
650 enable_dma(CH_PPI);
651 bfin_t350mcqb_enable_ppi();
652
653 return 0;
654}
655#else
656#define bfin_t350mcqb_suspend NULL
657#define bfin_t350mcqb_resume NULL
658#endif
659
660static struct platform_driver bfin_t350mcqb_driver = {
661 .probe = bfin_t350mcqb_probe,
662 .remove = bfin_t350mcqb_remove,
663 .suspend = bfin_t350mcqb_suspend,
664 .resume = bfin_t350mcqb_resume,
665 .driver = {
666 .name = DRIVER_NAME,
667 .owner = THIS_MODULE,
668 },
669};
670
671static int __devinit bfin_t350mcqb_driver_init(void)
672{
673 return platform_driver_register(&bfin_t350mcqb_driver);
674}
675
676static void __exit bfin_t350mcqb_driver_cleanup(void)
677{
678 platform_driver_unregister(&bfin_t350mcqb_driver);
679}
680
681MODULE_DESCRIPTION("Blackfin TFT LCD Driver");
682MODULE_LICENSE("GPL");
683
684module_init(bfin_t350mcqb_driver_init);
685module_exit(bfin_t350mcqb_driver_cleanup);
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index 6796ba62c3c6..777389c40988 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -459,7 +459,7 @@ static int chipsfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
459 459
460 if (state.event == pdev->dev.power.power_state.event) 460 if (state.event == pdev->dev.power.power_state.event)
461 return 0; 461 return 0;
462 if (state.event != PM_EVENT_SUSPEND) 462 if (!(state.event & PM_EVENT_SLEEP))
463 goto done; 463 goto done;
464 464
465 acquire_console_sem(); 465 acquire_console_sem();
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c
index 756c0ce85911..392a8be6aa76 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/hitfb.c
@@ -403,7 +403,7 @@ static int __init hitfb_probe(struct platform_device *dev)
403 return 0; 403 return 0;
404} 404}
405 405
406static int __devexit hitfb_remove(struct platform_device *dev) 406static int __exit hitfb_remove(struct platform_device *dev)
407{ 407{
408 return unregister_framebuffer(&fb_info); 408 return unregister_framebuffer(&fb_info);
409} 409}
@@ -439,7 +439,7 @@ static int hitfb_resume(struct platform_device *dev)
439 439
440static struct platform_driver hitfb_driver = { 440static struct platform_driver hitfb_driver = {
441 .probe = hitfb_probe, 441 .probe = hitfb_probe,
442 .remove = __devexit_p(hitfb_remove), 442 .remove = __exit_p(hitfb_remove),
443#ifdef CONFIG_PM 443#ifdef CONFIG_PM
444 .suspend = hitfb_suspend, 444 .suspend = hitfb_suspend,
445 .resume = hitfb_resume, 445 .resume = hitfb_resume,
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 80cd117ca65c..01f77bcc68f9 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -889,7 +889,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
889 struct mbxfb_info *mfbi; 889 struct mbxfb_info *mfbi;
890 struct mbxfb_platform_data *pdata; 890 struct mbxfb_platform_data *pdata;
891 891
892 dev_dbg(dev, "mbxfb_probe\n"); 892 dev_dbg(&dev->dev, "mbxfb_probe\n");
893 893
894 pdata = dev->dev.platform_data; 894 pdata = dev->dev.platform_data;
895 if (!pdata) { 895 if (!pdata) {
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 74517b1b26a6..596652d2831f 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1066,7 +1066,7 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
1066 acquire_console_sem(); 1066 acquire_console_sem();
1067 par->pm_state = mesg.event; 1067 par->pm_state = mesg.event;
1068 1068
1069 if (mesg.event == PM_EVENT_SUSPEND) { 1069 if (mesg.event & PM_EVENT_SLEEP) {
1070 fb_set_suspend(info, 1); 1070 fb_set_suspend(info, 1);
1071 nvidiafb_blank(FB_BLANK_POWERDOWN, info); 1071 nvidiafb_blank(FB_BLANK_POWERDOWN, info);
1072 nvidia_write_regs(par, &par->SavedReg); 1072 nvidia_write_regs(par, &par->SavedReg);
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 6a3d0b574897..8c863a7f654b 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -1,16 +1,12 @@
1/* drivers/video/pvr2fb.c 1/*
2 * drivers/video/pvr2fb.c
2 * 3 *
3 * Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega 4 * Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega
4 * Dreamcast. 5 * Dreamcast.
5 * 6 *
6 * Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org> 7 * Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org>
7 * Copyright (c) 2001, 2002, 2003, 2004, 2005 Paul Mundt <lethal@linux-sh.org> 8 * Copyright (c) 2001 - 2008 Paul Mundt <lethal@linux-sh.org>
8 *
9 * This file is part of the LinuxDC project (linuxdc.sourceforge.net).
10 * 9 *
11 */
12
13/*
14 * This driver is mostly based on the excellent amifb and vfb sources. It uses 10 * This driver is mostly based on the excellent amifb and vfb sources. It uses
15 * an odd scheme for converting hardware values to/from framebuffer values, 11 * an odd scheme for converting hardware values to/from framebuffer values,
16 * here are some hacked-up formulas: 12 * here are some hacked-up formulas:
@@ -490,7 +486,7 @@ static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
490 } else { 486 } else {
491 var->sync &= ~FB_SYNC_BROADCAST; 487 var->sync &= ~FB_SYNC_BROADCAST;
492 var->vmode &= ~FB_VMODE_INTERLACED; 488 var->vmode &= ~FB_VMODE_INTERLACED;
493 var->vmode |= pvr2_var.vmode; 489 var->vmode |= FB_VMODE_NONINTERLACED;
494 } 490 }
495 491
496 if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) { 492 if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) {
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index e83dfba7e636..742b5c656d66 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -237,12 +237,14 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var,
237 237
238 /* check we can fit these values into the registers */ 238 /* check we can fit these values into the registers */
239 239
240 if (var->hsync_len > 255 || var->vsync_len > 255) 240 if (var->hsync_len > 255 || var->vsync_len > 63)
241 return -EINVAL; 241 return -EINVAL;
242 242
243 if ((var->xres + var->right_margin) >= 4096) 243 /* hdisplay end and hsync start */
244 if ((var->xres + var->right_margin) > 4096)
244 return -EINVAL; 245 return -EINVAL;
245 246
247 /* vdisplay end and vsync start */
246 if ((var->yres + var->lower_margin) > 2048) 248 if ((var->yres + var->lower_margin) > 2048)
247 return -EINVAL; 249 return -EINVAL;
248 250
@@ -281,19 +283,21 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var,
281 var->blue.length = var->bits_per_pixel; 283 var->blue.length = var->bits_per_pixel;
282 var->blue.offset = 0; 284 var->blue.offset = 0;
283 var->transp.length = 0; 285 var->transp.length = 0;
286 var->transp.offset = 0;
284 287
285 break; 288 break;
286 289
287 case 16: 290 case 16:
288 if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { 291 if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) {
289 var->red.offset = 11;
290 var->green.offset = 5;
291 var->blue.offset = 0;
292 } else {
293 var->blue.offset = 11; 292 var->blue.offset = 11;
294 var->green.offset = 5; 293 var->green.offset = 5;
295 var->red.offset = 0; 294 var->red.offset = 0;
295 } else {
296 var->red.offset = 11;
297 var->green.offset = 5;
298 var->blue.offset = 0;
296 } 299 }
300 var->transp.offset = 0;
297 301
298 var->red.length = 5; 302 var->red.length = 5;
299 var->green.length = 6; 303 var->green.length = 6;
@@ -397,7 +401,7 @@ static int sm501fb_set_par_common(struct fb_info *info,
397 break; 401 break;
398 402
399 case 16: 403 case 16:
400 info->fix.visual = FB_VISUAL_DIRECTCOLOR; 404 info->fix.visual = FB_VISUAL_TRUECOLOR;
401 break; 405 break;
402 406
403 case 32: 407 case 32:
@@ -613,6 +617,7 @@ static int sm501fb_set_par_crt(struct fb_info *info)
613 617
614 case 16: 618 case 16:
615 control |= SM501_DC_CRT_CONTROL_16BPP; 619 control |= SM501_DC_CRT_CONTROL_16BPP;
620 sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE);
616 break; 621 break;
617 622
618 case 32: 623 case 32:
@@ -750,6 +755,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info)
750 755
751 case 16: 756 case 16:
752 control |= SM501_DC_PANEL_CONTROL_16BPP; 757 control |= SM501_DC_PANEL_CONTROL_16BPP;
758 sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE);
753 break; 759 break;
754 760
755 case 32: 761 case 32:
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
index e7c8db2eb49b..f98be301140c 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/stifb.c
@@ -505,16 +505,24 @@ ngleSetupAttrPlanes(struct stifb_info *fb, int BufferNumber)
505static void 505static void
506rattlerSetupPlanes(struct stifb_info *fb) 506rattlerSetupPlanes(struct stifb_info *fb)
507{ 507{
508 int saved_id, y;
509
510 /* Write RAMDAC pixel read mask register so all overlay
511 * planes are display-enabled. (CRX24 uses Bt462 pixel
512 * read mask register for overlay planes, not image planes).
513 */
508 CRX24_SETUP_RAMDAC(fb); 514 CRX24_SETUP_RAMDAC(fb);
509 515
510 /* replacement for: SETUP_FB(fb, CRX24_OVERLAY_PLANES); */ 516 /* change fb->id temporarily to fool SETUP_FB() */
511 WRITE_WORD(0x83000300, fb, REG_14); 517 saved_id = fb->id;
512 SETUP_HW(fb); 518 fb->id = CRX24_OVERLAY_PLANES;
513 WRITE_BYTE(1, fb, REG_16b1); 519 SETUP_FB(fb);
520 fb->id = saved_id;
521
522 for (y = 0; y < fb->info.var.yres; ++y)
523 memset(fb->info.screen_base + y * fb->info.fix.line_length,
524 0xff, fb->info.var.xres * fb->info.var.bits_per_pixel/8);
514 525
515 fb_memset((void*)fb->info.fix.smem_start, 0xff,
516 fb->info.var.yres*fb->info.fix.line_length);
517
518 CRX24_SET_OVLY_MASK(fb); 526 CRX24_SET_OVLY_MASK(fb);
519 SETUP_FB(fb); 527 SETUP_FB(fb);
520} 528}
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 70fb4ee2b421..0a4e07d43d2d 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -564,7 +564,7 @@ static inline void write3CE(int reg, unsigned char val)
564 t_outb(val, 0x3CF); 564 t_outb(val, 0x3CF);
565} 565}
566 566
567static inline void enable_mmio(void) 567static void enable_mmio(void)
568{ 568{
569 /* Goto New Mode */ 569 /* Goto New Mode */
570 outb(0x0B, 0x3C4); 570 outb(0x0B, 0x3C4);
@@ -579,6 +579,21 @@ static inline void enable_mmio(void)
579 outb(inb(0x3D5) | 0x01, 0x3D5); 579 outb(inb(0x3D5) | 0x01, 0x3D5);
580} 580}
581 581
582static void disable_mmio(void)
583{
584 /* Goto New Mode */
585 t_outb(0x0B, 0x3C4);
586 t_inb(0x3C5);
587
588 /* Unprotect registers */
589 t_outb(NewMode1, 0x3C4);
590 t_outb(0x80, 0x3C5);
591
592 /* Disable MMIO */
593 t_outb(PCIReg, 0x3D4);
594 t_outb(t_inb(0x3D5) & ~0x01, 0x3D5);
595}
596
582#define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) 597#define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F)
583 598
584/* Return flat panel's maximum x resolution */ 599/* Return flat panel's maximum x resolution */
@@ -730,7 +745,7 @@ static unsigned int __devinit get_memsize(void)
730 switch (tmp) { 745 switch (tmp) {
731 746
732 case 0x01: 747 case 0x01:
733 k = 512; 748 k = 512 * Kb;
734 break; 749 break;
735 case 0x02: 750 case 0x02:
736 k = 6 * Mb; /* XP */ 751 k = 6 * Mb; /* XP */
@@ -1239,9 +1254,9 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
1239 default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); 1254 default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
1240 1255
1241 if (!default_par.io_virt) { 1256 if (!default_par.io_virt) {
1242 release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
1243 debug("ioremap failed\n"); 1257 debug("ioremap failed\n");
1244 return -1; 1258 err = -1;
1259 goto out_unmap1;
1245 } 1260 }
1246 1261
1247 enable_mmio(); 1262 enable_mmio();
@@ -1252,25 +1267,21 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
1252 1267
1253 if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { 1268 if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) {
1254 debug("request_mem_region failed!\n"); 1269 debug("request_mem_region failed!\n");
1270 disable_mmio();
1255 err = -1; 1271 err = -1;
1256 goto out_unmap; 1272 goto out_unmap1;
1257 } 1273 }
1258 1274
1259 fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, 1275 fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start,
1260 tridentfb_fix.smem_len); 1276 tridentfb_fix.smem_len);
1261 1277
1262 if (!fb_info.screen_base) { 1278 if (!fb_info.screen_base) {
1263 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
1264 debug("ioremap failed\n"); 1279 debug("ioremap failed\n");
1265 err = -1; 1280 err = -1;
1266 goto out_unmap; 1281 goto out_unmap2;
1267 } 1282 }
1268 1283
1269 output("%s board found\n", pci_name(dev)); 1284 output("%s board found\n", pci_name(dev));
1270#if 0
1271 output("Trident board found : mem = %X, io = %X, mem_v = %X, io_v = %X\n",
1272 tridentfb_fix.smem_start, tridentfb_fix.mmio_start, fb_info.screen_base, default_par.io_virt);
1273#endif
1274 displaytype = get_displaytype(); 1285 displaytype = get_displaytype();
1275 1286
1276 if (flatpanel) 1287 if (flatpanel)
@@ -1288,9 +1299,12 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
1288 1299
1289 if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { 1300 if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) {
1290 err = -EINVAL; 1301 err = -EINVAL;
1291 goto out_unmap; 1302 goto out_unmap2;
1292 } 1303 }
1293 fb_alloc_cmap(&fb_info.cmap, 256, 0); 1304 err = fb_alloc_cmap(&fb_info.cmap, 256, 0);
1305 if (err < 0)
1306 goto out_unmap2;
1307
1294 if (defaultaccel && acc) 1308 if (defaultaccel && acc)
1295 default_var.accel_flags |= FB_ACCELF_TEXT; 1309 default_var.accel_flags |= FB_ACCELF_TEXT;
1296 else 1310 else
@@ -1300,19 +1314,24 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
1300 fb_info.device = &dev->dev; 1314 fb_info.device = &dev->dev;
1301 if (register_framebuffer(&fb_info) < 0) { 1315 if (register_framebuffer(&fb_info) < 0) {
1302 printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); 1316 printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n");
1317 fb_dealloc_cmap(&fb_info.cmap);
1303 err = -EINVAL; 1318 err = -EINVAL;
1304 goto out_unmap; 1319 goto out_unmap2;
1305 } 1320 }
1306 output("fb%d: %s frame buffer device %dx%d-%dbpp\n", 1321 output("fb%d: %s frame buffer device %dx%d-%dbpp\n",
1307 fb_info.node, fb_info.fix.id, default_var.xres, 1322 fb_info.node, fb_info.fix.id, default_var.xres,
1308 default_var.yres, default_var.bits_per_pixel); 1323 default_var.yres, default_var.bits_per_pixel);
1309 return 0; 1324 return 0;
1310 1325
1311out_unmap: 1326out_unmap2:
1312 if (default_par.io_virt)
1313 iounmap(default_par.io_virt);
1314 if (fb_info.screen_base) 1327 if (fb_info.screen_base)
1315 iounmap(fb_info.screen_base); 1328 iounmap(fb_info.screen_base);
1329 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
1330 disable_mmio();
1331out_unmap1:
1332 if (default_par.io_virt)
1333 iounmap(default_par.io_virt);
1334 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
1316 return err; 1335 return err;
1317} 1336}
1318 1337
@@ -1323,7 +1342,7 @@ static void __devexit trident_pci_remove(struct pci_dev *dev)
1323 iounmap(par->io_virt); 1342 iounmap(par->io_virt);
1324 iounmap(fb_info.screen_base); 1343 iounmap(fb_info.screen_base);
1325 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); 1344 release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len);
1326 release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); 1345 release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len);
1327} 1346}
1328 1347
1329/* List of boards that we are trying to support */ 1348/* List of boards that we are trying to support */
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index be27b9c1ed72..93361656316c 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -44,7 +44,7 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
44 44
45static int mtrr __devinitdata = 3; /* enable mtrr by default */ 45static int mtrr __devinitdata = 3; /* enable mtrr by default */
46static int blank = 1; /* enable blanking by default */ 46static int blank = 1; /* enable blanking by default */
47static int ypan __devinitdata = 1; /* 0: scroll, 1: ypan, 2: ywrap */ 47static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
48static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */ 48static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */
49static int nocrtc __devinitdata; /* ignore CRTC settings */ 49static int nocrtc __devinitdata; /* ignore CRTC settings */
50static int noedid __devinitdata; /* don't try DDC transfers */ 50static int noedid __devinitdata; /* don't try DDC transfers */
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 688e435b4d9a..10211e493001 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -17,6 +17,7 @@
17#include <linux/pm.h> 17#include <linux/pm.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/err.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/ds1wm.h> 22#include <linux/ds1wm.h>
22 23
@@ -102,12 +103,12 @@ struct ds1wm_data {
102static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, 103static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
103 u8 val) 104 u8 val)
104{ 105{
105 __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); 106 __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift));
106} 107}
107 108
108static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) 109static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg)
109{ 110{
110 return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); 111 return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift));
111} 112}
112 113
113 114
@@ -149,8 +150,8 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
149 timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); 150 timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT);
150 ds1wm_data->reset_complete = NULL; 151 ds1wm_data->reset_complete = NULL;
151 if (!timeleft) { 152 if (!timeleft) {
152 dev_dbg(&ds1wm_data->pdev->dev, "reset failed\n"); 153 dev_err(&ds1wm_data->pdev->dev, "reset failed\n");
153 return 1; 154 return 1;
154 } 155 }
155 156
156 /* Wait for the end of the reset. According to the specs, the time 157 /* Wait for the end of the reset. According to the specs, the time
@@ -167,11 +168,11 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
167 (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); 168 (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0));
168 169
169 if (!ds1wm_data->slave_present) { 170 if (!ds1wm_data->slave_present) {
170 dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); 171 dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n");
171 return 1; 172 return 1;
172 } 173 }
173 174
174 return 0; 175 return 0;
175} 176}
176 177
177static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) 178static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data)
@@ -334,7 +335,7 @@ static int ds1wm_probe(struct platform_device *pdev)
334 if (!pdev) 335 if (!pdev)
335 return -ENODEV; 336 return -ENODEV;
336 337
337 ds1wm_data = kzalloc(sizeof (*ds1wm_data), GFP_KERNEL); 338 ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
338 if (!ds1wm_data) 339 if (!ds1wm_data)
339 return -ENOMEM; 340 return -ENOMEM;
340 341
@@ -374,8 +375,8 @@ static int ds1wm_probe(struct platform_device *pdev)
374 goto err1; 375 goto err1;
375 376
376 ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); 377 ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm");
377 if (!ds1wm_data->clk) { 378 if (IS_ERR(ds1wm_data->clk)) {
378 ret = -ENOENT; 379 ret = PTR_ERR(ds1wm_data->clk);
379 goto err2; 380 goto err2;
380 } 381 }
381 382
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index 5941ca601a3a..df72f90123df 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -59,9 +59,9 @@ static int ticks = 10000;
59 59
60static struct { 60static struct {
61 struct completion stop; 61 struct completion stop;
62 volatile int running; 62 int running;
63 struct timer_list timer; 63 struct timer_list timer;
64 volatile int queue; 64 int queue;
65 int default_ticks; 65 int default_ticks;
66 unsigned long inuse; 66 unsigned long inuse;
67} cpu5wdt_device; 67} cpu5wdt_device;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index a2e174b09fe7..6483d1066b95 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -58,41 +58,6 @@ struct bios32_service_dir {
58 u8 reserved[5]; 58 u8 reserved[5];
59}; 59};
60 60
61/*
62 * smbios_entry_point - defines SMBIOS entry point structure
63 *
64 * anchor[4] - anchor string (_SM_)
65 * checksum - checksum of the entry point structure
66 * length - length of the entry point structure
67 * major_ver - major version (02h for revision 2.1)
68 * minor_ver - minor version (01h for revision 2.1)
69 * max_struct_size - size of the largest SMBIOS structure
70 * revision - entry point structure revision implemented
71 * formatted_area[5] - reserved
72 * intermediate_anchor[5] - intermediate anchor string (_DMI_)
73 * intermediate_checksum - intermediate checksum
74 * table_length - structure table length
75 * table_address - structure table address
76 * table_num_structs - number of SMBIOS structures present
77 * bcd_revision - BCD revision
78 */
79struct smbios_entry_point {
80 u8 anchor[4];
81 u8 checksum;
82 u8 length;
83 u8 major_ver;
84 u8 minor_ver;
85 u16 max_struct_size;
86 u8 revision;
87 u8 formatted_area[5];
88 u8 intermediate_anchor[5];
89 u8 intermediate_checksum;
90 u16 table_length;
91 u64 table_address;
92 u16 table_num_structs;
93 u8 bcd_revision;
94};
95
96/* type 212 */ 61/* type 212 */
97struct smbios_cru64_info { 62struct smbios_cru64_info {
98 u8 type; 63 u8 type;
@@ -175,31 +140,13 @@ static struct pci_device_id hpwdt_devices[] = {
175}; 140};
176MODULE_DEVICE_TABLE(pci, hpwdt_devices); 141MODULE_DEVICE_TABLE(pci, hpwdt_devices);
177 142
178/*
179 * bios_checksum
180 */
181static int __devinit bios_checksum(const char __iomem *ptr, int len)
182{
183 char sum = 0;
184 int i;
185
186 /*
187 * calculate checksum of size bytes. This should add up
188 * to zero if we have a valid header.
189 */
190 for (i = 0; i < len; i++)
191 sum += ptr[i];
192
193 return ((sum == 0) && (len > 0));
194}
195
196#ifndef CONFIG_X86_64 143#ifndef CONFIG_X86_64
197/* --32 Bit Bios------------------------------------------------------------ */ 144/* --32 Bit Bios------------------------------------------------------------ */
198 145
199#define HPWDT_ARCH 32 146#define HPWDT_ARCH 32
200 147
201asmlinkage void asminline_call(struct cmn_registers *pi86Regs, 148static void asminline_call(struct cmn_registers *pi86Regs,
202 unsigned long *pRomEntry) 149 unsigned long *pRomEntry)
203{ 150{
204 asm("pushl %ebp \n\t" 151 asm("pushl %ebp \n\t"
205 "movl %esp, %ebp \n\t" 152 "movl %esp, %ebp \n\t"
@@ -303,6 +250,24 @@ static int __devinit cru_detect(unsigned long map_entry,
303} 250}
304 251
305/* 252/*
253 * bios_checksum
254 */
255static int __devinit bios_checksum(const char __iomem *ptr, int len)
256{
257 char sum = 0;
258 int i;
259
260 /*
261 * calculate checksum of size bytes. This should add up
262 * to zero if we have a valid header.
263 */
264 for (i = 0; i < len; i++)
265 sum += ptr[i];
266
267 return ((sum == 0) && (len > 0));
268}
269
270/*
306 * bios32_present 271 * bios32_present
307 * 272 *
308 * Routine Description: 273 * Routine Description:
@@ -368,8 +333,8 @@ static int __devinit detect_cru_service(void)
368 333
369#define HPWDT_ARCH 64 334#define HPWDT_ARCH 64
370 335
371asmlinkage void asminline_call(struct cmn_registers *pi86Regs, 336static void asminline_call(struct cmn_registers *pi86Regs,
372 unsigned long *pRomEntry) 337 unsigned long *pRomEntry)
373{ 338{
374 asm("pushq %rbp \n\t" 339 asm("pushq %rbp \n\t"
375 "movq %rsp, %rbp \n\t" 340 "movq %rsp, %rbp \n\t"
@@ -410,12 +375,8 @@ asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
410 * dmi_find_cru 375 * dmi_find_cru
411 * 376 *
412 * Routine Description: 377 * Routine Description:
413 * This function checks wether or not a SMBIOS/DMI record is 378 * This function checks whether or not a SMBIOS/DMI record is
414 * the 64bit CRU info or not 379 * the 64bit CRU info or not
415 *
416 * Return Value:
417 * 0 : SUCCESS - if record found
418 * <0 : FAILURE - if record not found
419 */ 380 */
420static void __devinit dmi_find_cru(const struct dmi_header *dm) 381static void __devinit dmi_find_cru(const struct dmi_header *dm)
421{ 382{
@@ -434,138 +395,11 @@ static void __devinit dmi_find_cru(const struct dmi_header *dm)
434 } 395 }
435} 396}
436 397
437/*
438 * dmi_table
439 *
440 * Routine Description:
441 * Decode the SMBIOS/DMI table and check if we have a 64bit CRU record
442 * or not.
443 *
444 * We have to be cautious here. We have seen BIOSes with DMI pointers
445 * pointing to completely the wrong place for example
446 */
447static void __devinit dmi_table(u8 *buf, int len, int num,
448 void (*decode)(const struct dmi_header *))
449{
450 u8 *data = buf;
451 int i = 0;
452
453 /*
454 * Stop when we see all the items the table claimed to have
455 * OR we run off the end of the table (also happens)
456 */
457 while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
458 const struct dmi_header *dm = (const struct dmi_header *)data;
459
460 /*
461 * We want to know the total length (formated area and strings)
462 * before decoding to make sure we won't run off the table in
463 * dmi_decode or dmi_string
464 */
465 data += dm->length;
466 while ((data - buf < len - 1) && (data[0] || data[1]))
467 data++;
468 if (data - buf < len - 1)
469 decode(dm);
470 data += 2;
471 i++;
472 }
473}
474
475/*
476 * smbios_present
477 *
478 * Routine Description:
479 * This function parses the SMBIOS entry point table to retrieve
480 * the 64 bit CRU Service.
481 *
482 * Return Value:
483 * 0 : SUCCESS
484 * <0 : FAILURE
485 */
486static int __devinit smbios_present(const char __iomem *p)
487{
488 struct smbios_entry_point *eps =
489 (struct smbios_entry_point *) p;
490 int length;
491 u8 *buf;
492
493 /* check if we have indeed the SMBIOS table entry point */
494 if ((strncmp((char *)eps->anchor, "_SM_",
495 sizeof(eps->anchor))) == 0) {
496 length = eps->length;
497
498 /* SMBIOS v2.1 implementation might use 0x1e */
499 if ((length == 0x1e) &&
500 (eps->major_ver == 2) &&
501 (eps->minor_ver == 1))
502 length = 0x1f;
503
504 /*
505 * Now we will check:
506 * - SMBIOS checksum must be 0
507 * - intermediate anchor should be _DMI_
508 * - intermediate checksum should be 0
509 */
510 if ((bios_checksum(p, length)) &&
511 (strncmp((char *)eps->intermediate_anchor, "_DMI_",
512 sizeof(eps->intermediate_anchor)) == 0) &&
513 (bios_checksum(p+0x10, 15))) {
514 buf = ioremap(eps->table_address, eps->table_length);
515 if (buf == NULL)
516 return -ENODEV;
517
518
519 /* Scan the DMI table for the 64 bit CRU service */
520 dmi_table(buf, eps->table_length,
521 eps->table_num_structs, dmi_find_cru);
522
523 iounmap(buf);
524 return 0;
525 }
526 }
527
528 return -ENODEV;
529}
530
531static int __devinit smbios_scan_machine(void)
532{
533 char __iomem *p, *q;
534 int rc;
535
536 if (efi_enabled) {
537 if (efi.smbios == EFI_INVALID_TABLE_ADDR)
538 return -ENODEV;
539
540 p = ioremap(efi.smbios, 32);
541 if (p == NULL)
542 return -ENOMEM;
543
544 rc = smbios_present(p);
545 iounmap(p);
546 } else {
547 /*
548 * Search from 0x0f0000 through 0x0fffff, inclusive.
549 */
550 p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
551 if (p == NULL)
552 return -ENOMEM;
553
554 for (q = p; q < p + ROM_SIZE; q += 16) {
555 rc = smbios_present(q);
556 if (!rc) {
557 break;
558 }
559 }
560 iounmap(p);
561 }
562}
563
564static int __devinit detect_cru_service(void) 398static int __devinit detect_cru_service(void)
565{ 399{
566 cru_rom_addr = NULL; 400 cru_rom_addr = NULL;
567 401
568 smbios_scan_machine(); /* will become dmi_walk(dmi_find_cru); */ 402 dmi_walk(dmi_find_cru);
569 403
570 /* if cru_rom_addr has been set then we found a CRU service */ 404 /* if cru_rom_addr has been set then we found a CRU service */
571 return ((cru_rom_addr != NULL)? 0: -ENODEV); 405 return ((cru_rom_addr != NULL)? 0: -ENODEV);
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index 1b6d7d1b715d..1efcad3b6fca 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -7,7 +7,8 @@
7 * 7 *
8 * drivers/char/watchdog/scx200_wdt.c 8 * drivers/char/watchdog/scx200_wdt.c
9 * drivers/hwmon/it87.c 9 * drivers/hwmon/it87.c
10 * IT8712F EC-LPC I/O Preliminary Specification 0.9.2.pdf 10 * IT8712F EC-LPC I/O Preliminary Specification 0.8.2
11 * IT8712F EC-LPC I/O Preliminary Specification 0.9.3
11 * 12 *
12 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as 14 * modify it under the terms of the GNU General Public License as
@@ -40,6 +41,7 @@ MODULE_DESCRIPTION("IT8712F Watchdog Driver");
40MODULE_LICENSE("GPL"); 41MODULE_LICENSE("GPL");
41MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 42MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
42 43
44static int max_units = 255;
43static int margin = 60; /* in seconds */ 45static int margin = 60; /* in seconds */
44module_param(margin, int, 0); 46module_param(margin, int, 0);
45MODULE_PARM_DESC(margin, "Watchdog margin in seconds"); 47MODULE_PARM_DESC(margin, "Watchdog margin in seconds");
@@ -51,6 +53,7 @@ MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
51static struct semaphore it8712f_wdt_sem; 53static struct semaphore it8712f_wdt_sem;
52static unsigned expect_close; 54static unsigned expect_close;
53static spinlock_t io_lock; 55static spinlock_t io_lock;
56static unsigned char revision;
54 57
55/* Dog Food address - We use the game port address */ 58/* Dog Food address - We use the game port address */
56static unsigned short address; 59static unsigned short address;
@@ -108,6 +111,15 @@ superio_inw(int reg)
108 return val; 111 return val;
109} 112}
110 113
114static void
115superio_outw(int val, int reg)
116{
117 outb(reg++, REG);
118 outb((val >> 8) & 0xff, VAL);
119 outb(reg, REG);
120 outb(val & 0xff, VAL);
121}
122
111static inline void 123static inline void
112superio_select(int ldn) 124superio_select(int ldn)
113{ 125{
@@ -143,15 +155,33 @@ static void
143it8712f_wdt_update_margin(void) 155it8712f_wdt_update_margin(void)
144{ 156{
145 int config = WDT_OUT_KRST | WDT_OUT_PWROK; 157 int config = WDT_OUT_KRST | WDT_OUT_PWROK;
146 158 int units = margin;
147 printk(KERN_INFO NAME ": timer margin %d seconds\n", margin); 159
148 160 /* Switch to minutes precision if the configured margin
149 /* The timeout register only has 8bits wide */ 161 * value does not fit within the register width.
150 if (margin < 256) 162 */
151 config |= WDT_UNIT_SEC; /* else UNIT are MINUTES */ 163 if (units <= max_units) {
164 config |= WDT_UNIT_SEC; /* else UNIT is MINUTES */
165 printk(KERN_INFO NAME ": timer margin %d seconds\n", units);
166 } else {
167 units /= 60;
168 printk(KERN_INFO NAME ": timer margin %d minutes\n", units);
169 }
152 superio_outb(config, WDT_CONFIG); 170 superio_outb(config, WDT_CONFIG);
153 171
154 superio_outb((margin > 255) ? (margin / 60) : margin, WDT_TIMEOUT); 172 if (revision >= 0x08)
173 superio_outw(units, WDT_TIMEOUT);
174 else
175 superio_outb(units, WDT_TIMEOUT);
176}
177
178static int
179it8712f_wdt_get_status(void)
180{
181 if (superio_inb(WDT_CONTROL) & 0x01)
182 return WDIOF_CARDRESET;
183 else
184 return 0;
155} 185}
156 186
157static void 187static void
@@ -234,7 +264,7 @@ it8712f_wdt_ioctl(struct inode *inode, struct file *file,
234 .firmware_version = 1, 264 .firmware_version = 1,
235 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, 265 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
236 }; 266 };
237 int new_margin; 267 int value;
238 268
239 switch (cmd) { 269 switch (cmd) {
240 default: 270 default:
@@ -244,17 +274,27 @@ it8712f_wdt_ioctl(struct inode *inode, struct file *file,
244 return -EFAULT; 274 return -EFAULT;
245 return 0; 275 return 0;
246 case WDIOC_GETSTATUS: 276 case WDIOC_GETSTATUS:
277 superio_enter();
278 superio_select(LDN_GPIO);
279
280 value = it8712f_wdt_get_status();
281
282 superio_exit();
283
284 return put_user(value, p);
247 case WDIOC_GETBOOTSTATUS: 285 case WDIOC_GETBOOTSTATUS:
248 return put_user(0, p); 286 return put_user(0, p);
249 case WDIOC_KEEPALIVE: 287 case WDIOC_KEEPALIVE:
250 it8712f_wdt_ping(); 288 it8712f_wdt_ping();
251 return 0; 289 return 0;
252 case WDIOC_SETTIMEOUT: 290 case WDIOC_SETTIMEOUT:
253 if (get_user(new_margin, p)) 291 if (get_user(value, p))
254 return -EFAULT; 292 return -EFAULT;
255 if (new_margin < 1) 293 if (value < 1)
294 return -EINVAL;
295 if (value > (max_units * 60))
256 return -EINVAL; 296 return -EINVAL;
257 margin = new_margin; 297 margin = value;
258 superio_enter(); 298 superio_enter();
259 superio_select(LDN_GPIO); 299 superio_select(LDN_GPIO);
260 300
@@ -262,6 +302,7 @@ it8712f_wdt_ioctl(struct inode *inode, struct file *file,
262 302
263 superio_exit(); 303 superio_exit();
264 it8712f_wdt_ping(); 304 it8712f_wdt_ping();
305 /* Fall through */
265 case WDIOC_GETTIMEOUT: 306 case WDIOC_GETTIMEOUT:
266 if (put_user(margin, p)) 307 if (put_user(margin, p))
267 return -EFAULT; 308 return -EFAULT;
@@ -336,9 +377,18 @@ it8712f_wdt_find(unsigned short *address)
336 } 377 }
337 378
338 err = 0; 379 err = 0;
339 printk(KERN_DEBUG NAME ": Found IT%04xF chip revision %d - " 380 revision = superio_inb(DEVREV) & 0x0f;
381
382 /* Later revisions have 16-bit values per datasheet 0.9.1 */
383 if (revision >= 0x08)
384 max_units = 65535;
385
386 if (margin > (max_units * 60))
387 margin = (max_units * 60);
388
389 printk(KERN_INFO NAME ": Found IT%04xF chip revision %d - "
340 "using DogFood address 0x%x\n", 390 "using DogFood address 0x%x\n",
341 chip_type, superio_inb(DEVREV) & 0x0f, *address); 391 chip_type, revision, *address);
342 392
343exit: 393exit:
344 superio_exit(); 394 superio_exit();
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index e6e07b4575eb..6905135a776c 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -141,7 +141,7 @@ static unsigned long next_heartbeat = 0;
141#ifndef ZF_DEBUG 141#ifndef ZF_DEBUG
142# define dprintk(format, args...) 142# define dprintk(format, args...)
143#else 143#else
144# define dprintk(format, args...) printk(KERN_DEBUG PFX ":%s:%d: " format, __FUNCTION__, __LINE__ , ## args) 144# define dprintk(format, args...) printk(KERN_DEBUG PFX ":%s:%d: " format, __func__, __LINE__ , ## args)
145#endif 145#endif
146 146
147 147
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 789831b3fa00..10b89f2703bd 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -59,9 +59,9 @@ static int ticks = 100 * HZ;
59 59
60static struct { 60static struct {
61 struct completion stop; 61 struct completion stop;
62 volatile int running; 62 int running;
63 struct timer_list timer; 63 struct timer_list timer;
64 volatile int queue; 64 int queue;
65 int default_ticks; 65 int default_ticks;
66 unsigned long inuse; 66 unsigned long inuse;
67 unsigned gpio; 67 unsigned gpio;
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 0f3fd6c9c354..bf443d077a1e 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -179,11 +179,11 @@ static void usb_pcwd_intr_done(struct urb *urb)
179 case -ENOENT: 179 case -ENOENT:
180 case -ESHUTDOWN: 180 case -ESHUTDOWN:
181 /* this urb is terminated, clean up */ 181 /* this urb is terminated, clean up */
182 dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); 182 dbg("%s - urb shutting down with status: %d", __func__, urb->status);
183 return; 183 return;
184 /* -EPIPE: should clear the halt */ 184 /* -EPIPE: should clear the halt */
185 default: /* error */ 185 default: /* error */
186 dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status); 186 dbg("%s - nonzero urb status received: %d", __func__, urb->status);
187 goto resubmit; 187 goto resubmit;
188 } 188 }
189 189
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 5d1c15f83d23..7645e8812156 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -144,7 +144,7 @@ static int s3c2410wdt_start(void)
144 } 144 }
145 145
146 DBG("%s: wdt_count=0x%08x, wtcon=%08lx\n", 146 DBG("%s: wdt_count=0x%08x, wtcon=%08lx\n",
147 __FUNCTION__, wdt_count, wtcon); 147 __func__, wdt_count, wtcon);
148 148
149 writel(wdt_count, wdt_base + S3C2410_WTDAT); 149 writel(wdt_count, wdt_base + S3C2410_WTDAT);
150 writel(wdt_count, wdt_base + S3C2410_WTCNT); 150 writel(wdt_count, wdt_base + S3C2410_WTCNT);
@@ -167,7 +167,7 @@ static int s3c2410wdt_set_heartbeat(int timeout)
167 count = timeout * freq; 167 count = timeout * freq;
168 168
169 DBG("%s: count=%d, timeout=%d, freq=%d\n", 169 DBG("%s: count=%d, timeout=%d, freq=%d\n",
170 __FUNCTION__, count, timeout, freq); 170 __func__, count, timeout, freq);
171 171
172 /* if the count is bigger than the watchdog register, 172 /* if the count is bigger than the watchdog register,
173 then work out what we need to do (and if) we can 173 then work out what we need to do (and if) we can
@@ -189,7 +189,7 @@ static int s3c2410wdt_set_heartbeat(int timeout)
189 tmr_margin = timeout; 189 tmr_margin = timeout;
190 190
191 DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n", 191 DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n",
192 __FUNCTION__, timeout, divisor, count, count/divisor); 192 __func__, timeout, divisor, count, count/divisor);
193 193
194 count /= divisor; 194 count /= divisor;
195 wdt_count = count; 195 wdt_count = count;
@@ -355,7 +355,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
355 int ret; 355 int ret;
356 int size; 356 int size;
357 357
358 DBG("%s: probe=%p\n", __FUNCTION__, pdev); 358 DBG("%s: probe=%p\n", __func__, pdev);
359 359
360 dev = &pdev->dev; 360 dev = &pdev->dev;
361 wdt_dev = &pdev->dev; 361 wdt_dev = &pdev->dev;
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 61dde863bd40..1277f7e9cc54 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -298,7 +298,7 @@ static int sh_wdt_mmap(struct file *file, struct vm_area_struct *vma)
298 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 298 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
299 PAGE_SIZE, vma->vm_page_prot)) { 299 PAGE_SIZE, vma->vm_page_prot)) {
300 printk(KERN_ERR PFX "%s: io_remap_pfn_range failed\n", 300 printk(KERN_ERR PFX "%s: io_remap_pfn_range failed\n",
301 __FUNCTION__); 301 __func__);
302 return -EAGAIN; 302 return -EAGAIN;
303 } 303 }
304 304