aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/blacklist.c64
-rw-r--r--drivers/acpi/ec.c6
-rw-r--r--drivers/acpi/event.c2
-rw-r--r--drivers/acpi/executer/exregion.c5
-rw-r--r--drivers/acpi/fan.c30
-rw-r--r--drivers/acpi/hardware/hwsleep.c1
-rw-r--r--drivers/acpi/osl.c22
-rw-r--r--drivers/acpi/processor_core.c39
-rw-r--r--drivers/acpi/processor_idle.c29
-rw-r--r--drivers/acpi/utils.c18
-rw-r--r--drivers/acpi/video.c3
-rw-r--r--drivers/acpi/wmi.c6
-rw-r--r--drivers/ata/ahci.c30
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libata-core.c695
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/libata-pmp.c4
-rw-r--r--drivers/ata/libata-scsi.c77
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_acpi.c4
-rw-r--r--drivers/ata/pata_amd.c9
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_cs5536.c10
-rw-r--r--drivers/ata/pata_icside.c8
-rw-r--r--drivers/ata/pata_jmicron.c3
-rw-r--r--drivers/ata/pata_legacy.c48
-rw-r--r--drivers/ata/pata_marvell.c4
-rw-r--r--drivers/ata/pata_ninja32.c9
-rw-r--r--drivers/ata/pata_scc.c2
-rw-r--r--drivers/ata/pata_via.c6
-rw-r--r--drivers/ata/sata_fsl.c21
-rw-r--r--drivers/ata/sata_mv.c69
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/ata/sata_via.c4
-rw-r--r--drivers/base/bus.c9
-rw-r--r--drivers/base/driver.c3
-rw-r--r--drivers/base/power/main.c4
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/block/xen-blkfront.c18
-rw-r--r--drivers/bluetooth/hci_ldisc.c1
-rw-r--r--drivers/char/agp/amd-k7-agp.c9
-rw-r--r--drivers/char/agp/ati-agp.c16
-rw-r--r--drivers/char/agp/generic.c9
-rw-r--r--drivers/char/agp/sis-agp.c47
-rw-r--r--drivers/char/agp/sworks-agp.c18
-rw-r--r--drivers/char/drm/drmP.h2
-rw-r--r--drivers/char/drm/drm_pciids.h3
-rw-r--r--drivers/char/drm/drm_sysfs.c2
-rw-r--r--drivers/char/drm/drm_vm.c125
-rw-r--r--drivers/char/drm/i830_dma.c2
-rw-r--r--drivers/char/drm/i915_dma.c5
-rw-r--r--drivers/char/drm/i915_drv.c52
-rw-r--r--drivers/char/drm/i915_drv.h17
-rw-r--r--drivers/char/drm/radeon_cp.c81
-rw-r--r--drivers/char/drm/radeon_drv.h38
-rw-r--r--drivers/char/hvc_rtas.c2
-rw-r--r--drivers/char/pcmcia/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpuidle/cpuidle.c3
-rw-r--r--drivers/cpuidle/sysfs.c14
-rw-r--r--drivers/crypto/hifn_795x.c6
-rw-r--r--drivers/firmware/dmi_scan.c82
-rw-r--r--drivers/hid/hid-input-quirks.c17
-rw-r--r--drivers/hid/hid-input.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c38
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/ad7418.c2
-rw-r--r--drivers/hwmon/adm1021.c6
-rw-r--r--drivers/hwmon/adm1025.c2
-rw-r--r--drivers/hwmon/adm1026.c4
-rw-r--r--drivers/hwmon/adm1029.c6
-rw-r--r--drivers/hwmon/adm1031.c2
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/ads7828.c2
-rw-r--r--drivers/hwmon/adt7470.c2
-rw-r--r--drivers/hwmon/adt7473.c1157
-rw-r--r--drivers/hwmon/applesmc.c29
-rw-r--r--drivers/hwmon/asb100.c2
-rw-r--r--drivers/hwmon/atxp1.c2
-rw-r--r--drivers/hwmon/coretemp.c119
-rw-r--r--drivers/hwmon/dme1737.c2
-rw-r--r--drivers/hwmon/ds1621.c2
-rw-r--r--drivers/hwmon/f75375s.c2
-rw-r--r--drivers/hwmon/fscher.c2
-rw-r--r--drivers/hwmon/fschmd.c2
-rw-r--r--drivers/hwmon/fscpos.c2
-rw-r--r--drivers/hwmon/gl518sm.c2
-rw-r--r--drivers/hwmon/gl520sm.c2
-rw-r--r--drivers/hwmon/lm63.c2
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/lm77.c3
-rw-r--r--drivers/hwmon/lm78.c4
-rw-r--r--drivers/hwmon/lm80.c4
-rw-r--r--drivers/hwmon/lm83.c6
-rw-r--r--drivers/hwmon/lm85.c2
-rw-r--r--drivers/hwmon/lm87.c2
-rw-r--r--drivers/hwmon/lm90.c6
-rw-r--r--drivers/hwmon/lm92.c20
-rw-r--r--drivers/hwmon/lm93.c2
-rw-r--r--drivers/hwmon/max1619.c23
-rw-r--r--drivers/hwmon/max6650.c3
-rw-r--r--drivers/hwmon/smsc47m1.c25
-rw-r--r--drivers/hwmon/smsc47m192.c2
-rw-r--r--drivers/hwmon/thmc50.c8
-rw-r--r--drivers/hwmon/via686a.c28
-rw-r--r--drivers/hwmon/vt8231.c44
-rw-r--r--drivers/hwmon/w83781d.c4
-rw-r--r--drivers/hwmon/w83791d.c3
-rw-r--r--drivers/hwmon/w83792d.c3
-rw-r--r--drivers/hwmon/w83793.c3
-rw-r--r--drivers/hwmon/w83l785ts.c2
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-i801.c10
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c7
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-pxa.c29
-rw-r--r--drivers/ide/Kconfig26
-rw-r--r--drivers/ide/arm/bast-ide.c12
-rw-r--r--drivers/ide/arm/palm_bk3710.c74
-rw-r--r--drivers/ide/ide-cd.c8
-rw-r--r--drivers/ide/ide-disk.c19
-rw-r--r--drivers/ide/ide-dma.c14
-rw-r--r--drivers/ide/ide-generic.c6
-rw-r--r--drivers/ide/ide-io.c19
-rw-r--r--drivers/ide/ide-iops.c10
-rw-r--r--drivers/ide/ide-lib.c9
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ide/ide-tape.c34
-rw-r--r--drivers/ide/ide.c6
-rw-r--r--drivers/ide/legacy/falconide.c4
-rw-r--r--drivers/ide/legacy/gayle.c2
-rw-r--r--drivers/ide/legacy/ht6560b.c25
-rw-r--r--drivers/ide/legacy/macide.c2
-rw-r--r--drivers/ide/pci/cs5520.c5
-rw-r--r--drivers/ide/pci/pdc202xx_old.c22
-rw-r--r--drivers/ide/pci/via82cxxx.c1
-rw-r--r--drivers/ide/ppc/pmac.c4
-rw-r--r--drivers/infiniband/core/cm.c26
-rw-r--r--drivers/infiniband/core/cma.c10
-rw-r--r--drivers/infiniband/core/sysfs.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c17
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c62
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/macintosh/mediabay.c5
-rw-r--r--drivers/md/bitmap.c8
-rw-r--r--drivers/md/dm-raid1.c9
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/media/Kconfig18
-rw-r--r--drivers/media/common/Kconfig2
-rw-r--r--drivers/media/common/ir-keymaps.c46
-rw-r--r--drivers/media/common/saa7146_vbi.c1
-rw-r--r--drivers/media/common/saa7146_video.c2
-rw-r--r--drivers/media/dvb/bt8xx/bt878.c23
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.c1
-rw-r--r--drivers/media/dvb/frontends/tda10086.c28
-rw-r--r--drivers/media/dvb/frontends/tda10086.h3
-rw-r--r--drivers/media/dvb/frontends/tda18271-common.c2
-rw-r--r--drivers/media/dvb/frontends/xc5000.h3
-rw-r--r--drivers/media/dvb/ttpci/av7110_av.c15
-rw-r--r--drivers/media/dvb/ttpci/budget-av.c8
-rw-r--r--drivers/media/dvb/ttpci/budget.c1
-rw-r--r--drivers/media/radio/Kconfig4
-rw-r--r--drivers/media/radio/radio-sf16fmi.c1
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c5
-rw-r--r--drivers/media/radio/radio-si470x.c597
-rw-r--r--drivers/media/video/Kconfig4
-rw-r--r--drivers/media/video/Makefile5
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c51
-rw-r--r--drivers/media/video/bt8xx/bttv-vbi.c4
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c16
-rw-r--r--drivers/media/video/cx88/cx88.h1
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c6
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c8
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c111
-rw-r--r--drivers/media/video/em28xx/em28xx-video.c79
-rw-r--r--drivers/media/video/em28xx/em28xx.h5
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c123
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c28
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-video.c20
-rw-r--r--drivers/media/video/saa7134/saa7134.h2
-rw-r--r--drivers/media/video/stk-sensor.c23
-rw-r--r--drivers/media/video/stk-webcam.c104
-rw-r--r--drivers/media/video/stk-webcam.h3
-rw-r--r--drivers/media/video/tcm825x.c2
-rw-r--r--drivers/media/video/tuner-core.c2
-rw-r--r--drivers/media/video/tuner-xc2028.c3
-rw-r--r--drivers/media/video/tvaudio.c10
-rw-r--r--drivers/media/video/tveeprom.c2
-rw-r--r--drivers/media/video/v4l2-common.c393
-rw-r--r--drivers/media/video/videobuf-core.c78
-rw-r--r--drivers/media/video/videobuf-dma-sg.c4
-rw-r--r--drivers/media/video/videobuf-vmalloc.c20
-rw-r--r--drivers/media/video/videodev.c444
-rw-r--r--drivers/media/video/zoran.h22
-rw-r--r--drivers/media/video/zoran_device.c12
-rw-r--r--drivers/media/video/zr364xx.c2
-rw-r--r--drivers/memstick/host/tifm_ms.c2
-rw-r--r--drivers/message/fusion/mptbase.c54
-rw-r--r--drivers/message/fusion/mptbase.h3
-rw-r--r--drivers/misc/Kconfig17
-rw-r--r--drivers/misc/acer-wmi.c9
-rw-r--r--drivers/misc/intel_menlow.c11
-rw-r--r--drivers/misc/thinkpad_acpi.c127
-rw-r--r--drivers/mtd/mtdsuper.c14
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/bnx2.c50
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/cxgb3/l2t.c2
-rw-r--r--drivers/net/cxgb3/sge.c35
-rw-r--r--drivers/net/dm9000.c654
-rw-r--r--drivers/net/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/e1000/e1000_main.c26
-rw-r--r--drivers/net/e1000e/netdev.c34
-rw-r--r--drivers/net/forcedeth.c132
-rw-r--r--drivers/net/gianfar.c4
-rw-r--r--drivers/net/gianfar_mii.c4
-rw-r--r--drivers/net/hamradio/mkiss.c5
-rw-r--r--drivers/net/ibm_newemac/rgmii.c1
-rw-r--r--drivers/net/igb/igb_ethtool.c2
-rw-r--r--drivers/net/igb/igb_main.c28
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c41
-rw-r--r--drivers/net/mlx4/alloc.c1
-rw-r--r--drivers/net/mlx4/mr.c21
-rw-r--r--drivers/net/netconsole.c4
-rw-r--r--drivers/net/ni52.c1142
-rw-r--r--drivers/net/ni52.h158
-rw-r--r--drivers/net/niu.c20
-rw-r--r--drivers/net/niu.h2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c12
-rw-r--r--drivers/net/pcnet32.c48
-rw-r--r--drivers/net/phy/fixed.c4
-rw-r--r--drivers/net/ps3_gelic_net.c1215
-rw-r--r--drivers/net/ps3_gelic_net.h415
-rw-r--r--drivers/net/ps3_gelic_wireless.c2753
-rw-r--r--drivers/net/ps3_gelic_wireless.h329
-rw-r--r--drivers/net/r6040.c233
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tsi108_eth.c72
-rw-r--r--drivers/net/veth.c53
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath5k/base.c24
-rw-r--r--drivers/net/wireless/ath5k/hw.c42
-rw-r--r--drivers/net/wireless/b43/b43.h6
-rw-r--r--drivers/net/wireless/b43/main.c40
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h4
-rw-r--r--drivers/net/wireless/b43legacy/dma.c167
-rw-r--r--drivers/net/wireless/b43legacy/dma.h33
-rw-r--r--drivers/net/wireless/b43legacy/main.c41
-rw-r--r--drivers/net/wireless/ipw2200.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c17
-rw-r--r--drivers/net/wireless/p54usb.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rtl8180_dev.c4
-rw-r--r--drivers/net/wireless/rtl8187_dev.c4
-rw-r--r--drivers/net/wireless/wavelan.h6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c12
-rw-r--r--drivers/oprofile/buffer_sync.c21
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/pci/dmar.c9
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c6
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c33
-rw-r--r--drivers/pci/intel-iommu.c7
-rw-r--r--drivers/pci/intel-iommu.h5
-rw-r--r--drivers/pci/iova.c3
-rw-r--r--drivers/pci/iova.h3
-rw-r--r--drivers/pci/pci-acpi.c3
-rw-r--r--drivers/pci/pci.c1
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/quirks.c79
-rw-r--r--drivers/pci/setup-bus.c6
-rw-r--r--drivers/pcmcia/i82092.c7
-rw-r--r--drivers/pnp/pnpacpi/core.c2
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/ps3/ps3-lpm.c22
-rw-r--r--drivers/ps3/ps3-sys-manager.c44
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/sclp.c12
-rw-r--r--drivers/s390/char/sclp.h6
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/sclp_rw.c4
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/cio/device.c15
-rw-r--r--drivers/s390/cio/qdio.c13
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/net/claw.h19
-rw-r--r--drivers/s390/net/lcs.c2
-rw-r--r--drivers/s390/net/lcs.h16
-rw-r--r--drivers/s390/net/netiucv.c29
-rw-r--r--drivers/scsi/Kconfig12
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c70
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c47
-rw-r--r--drivers/scsi/aacraid/rx.c5
-rw-r--r--drivers/scsi/aacraid/sa.c5
-rw-r--r--drivers/scsi/advansys.c13
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c11
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sas.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c14
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c26
-rw-r--r--drivers/scsi/arm/fas216.c16
-rw-r--r--drivers/scsi/arm/fas216.h3
-rw-r--r--drivers/scsi/gdth.c16
-rw-r--r--drivers/scsi/gdth_proc.c6
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c68
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h66
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c384
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c328
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c154
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c115
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c70
-rw-r--r--drivers/scsi/megaraid.c10
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h1
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/mvsas.c2970
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c3
-rw-r--r--drivers/scsi/qlogicpti.c12
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/ses.c147
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/st.h1
-rw-r--r--drivers/scsi/stex.c44
-rw-r--r--drivers/scsi/sym53c416.c16
-rw-r--r--drivers/serial/atmel_serial.c2
-rw-r--r--drivers/serial/sh-sci.c2
-rw-r--r--drivers/serial/sh-sci.h9
-rw-r--r--drivers/sh/maple/maple.c981
-rw-r--r--drivers/spi/atmel_spi.c10
-rw-r--r--drivers/spi/pxa2xx_spi.c41
-rw-r--r--drivers/ssb/Kconfig6
-rw-r--r--drivers/ssb/Makefile1
-rw-r--r--drivers/ssb/driver_chipcommon.c65
-rw-r--r--drivers/ssb/driver_extif.c25
-rw-r--r--drivers/ssb/driver_pcicore.c45
-rw-r--r--drivers/ssb/embedded.c132
-rw-r--r--drivers/ssb/main.c4
-rw-r--r--drivers/thermal/thermal.c39
-rw-r--r--drivers/uio/uio.c54
-rw-r--r--drivers/usb/class/cdc-acm.c10
-rw-r--r--drivers/usb/class/usblp.c1
-rw-r--r--drivers/usb/core/quirks.c12
-rw-r--r--drivers/usb/gadget/ether.c1
-rw-r--r--drivers/usb/gadget/file_storage.c8
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/host/Kconfig5
-rw-r--r--drivers/usb/host/ehci-hcd.c26
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/host/u132-hcd.c15
-rw-r--r--drivers/usb/misc/ldusb.c2
-rw-r--r--drivers/usb/misc/trancevibrator.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/option.c43
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/storage/protocol.c27
-rw-r--r--drivers/usb/storage/transport.c11
-rw-r--r--drivers/usb/storage/unusual_devs.h34
-rw-r--r--drivers/video/aty/atyfb_base.c64
-rw-r--r--drivers/video/cg14.c1
-rw-r--r--drivers/video/chipsfb.c2
-rw-r--r--drivers/video/nvidia/nvidia.c2
-rw-r--r--drivers/video/pxafb.c8
-rw-r--r--drivers/video/sbuslib.c1
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--drivers/watchdog/Kconfig25
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/bfin_wdt.c7
-rw-r--r--drivers/watchdog/hpwdt.c926
-rw-r--r--drivers/watchdog/mtx-1_wdt.c35
-rw-r--r--drivers/watchdog/sb_wdog.c353
414 files changed, 17518 insertions, 5439 deletions
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 9ce983ed60f0..ea92bac42c53 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -186,6 +186,12 @@ static int __init dmi_unknown_osi_linux(const struct dmi_system_id *d)
186 acpi_dmi_osi_linux(-1, d); /* unknown */ 186 acpi_dmi_osi_linux(-1, d); /* unknown */
187 return 0; 187 return 0;
188} 188}
189static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
190{
191 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
192 acpi_osi_setup("!Windows 2006");
193 return 0;
194}
189 195
190/* 196/*
191 * Most BIOS that invoke OSI(Linux) do nothing with it. 197 * Most BIOS that invoke OSI(Linux) do nothing with it.
@@ -228,10 +234,10 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
228 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5520"), 234 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5520"),
229 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 6460"), 235 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 6460"),
230 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 7510"), 236 * DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 7510"),
231 * DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"),
232 * 237 *
233 * _OSI(Linux) is a NOP: 238 * _OSI(Linux) is a NOP:
234 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"), 239 * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
240 * DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"),
235 */ 241 */
236 { 242 {
237 .callback = dmi_disable_osi_linux, 243 .callback = dmi_disable_osi_linux,
@@ -327,12 +333,20 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
327 }, 333 },
328 { /* OSI(Linux) effect unknown */ 334 { /* OSI(Linux) effect unknown */
329 .callback = dmi_unknown_osi_linux, 335 .callback = dmi_unknown_osi_linux,
330 .ident = "Dell OP GX620", 336 .ident = "Dell OptiPlex GX620",
331 .matches = { 337 .matches = {
332 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 338 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
333 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX620"), 339 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX620"),
334 }, 340 },
335 }, 341 },
342 { /* OSI(Linux) causes some USB initialization to not run */
343 .callback = dmi_unknown_osi_linux,
344 .ident = "Dell OptiPlex 755",
345 .matches = {
346 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
347 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 755"),
348 },
349 },
336 { /* OSI(Linux) effect unknown */ 350 { /* OSI(Linux) effect unknown */
337 .callback = dmi_unknown_osi_linux, 351 .callback = dmi_unknown_osi_linux,
338 .ident = "Dell PE 1900", 352 .ident = "Dell PE 1900",
@@ -342,6 +356,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
342 }, 356 },
343 }, 357 },
344 { /* OSI(Linux) is a NOP */ 358 { /* OSI(Linux) is a NOP */
359 .callback = dmi_unknown_osi_linux,
360 .ident = "Dell PE 1950",
361 .matches = {
362 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
363 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
364 },
365 },
366 { /* OSI(Linux) is a NOP */
345 .callback = dmi_disable_osi_linux, 367 .callback = dmi_disable_osi_linux,
346 .ident = "Dell PE R200", 368 .ident = "Dell PE R200",
347 .matches = { 369 .matches = {
@@ -357,6 +379,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
357 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 390"), 379 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 390"),
358 }, 380 },
359 }, 381 },
382 { /* OSI(Linux) touches USB */
383 .callback = dmi_unknown_osi_linux,
384 .ident = "Dell PR 390",
385 .matches = {
386 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
387 DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 690"),
388 },
389 },
390 { /* OSI(Linux) unknown - ASL looks benign, but may effect dock/SMM */
391 .callback = dmi_unknown_osi_linux,
392 .ident = "Dell PR M4300",
393 .matches = {
394 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
395 DMI_MATCH(DMI_PRODUCT_NAME, "Precision M4300"),
396 },
397 },
360 { /* OSI(Linux) is a NOP */ 398 { /* OSI(Linux) is a NOP */
361 .callback = dmi_disable_osi_linux, 399 .callback = dmi_disable_osi_linux,
362 .ident = "Dell Vostro 1000", 400 .ident = "Dell Vostro 1000",
@@ -390,10 +428,10 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
390 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1536"), 428 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1536"),
391 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1556"), 429 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 1556"),
392 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 1546"), 430 * DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 1546"),
431 * DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
393 * _OSI(Linux) unknown effect: 432 * _OSI(Linux) unknown effect:
394 * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo M1425"), 433 * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo M1425"),
395 * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo Si 1520"), 434 * DMI_MATCH(DMI_PRODUCT_NAME, "Amilo Si 1520"),
396 * DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
397 */ 435 */
398 { 436 {
399 .callback = dmi_disable_osi_linux, 437 .callback = dmi_disable_osi_linux,
@@ -402,6 +440,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
402 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 440 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
403 }, 441 },
404 }, 442 },
443 {
444 .callback = dmi_disable_osi_vista,
445 .ident = "Fujitsu Siemens",
446 .matches = {
447 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
448 DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
449 },
450 },
405 /* 451 /*
406 * Disable OSI(Linux) warnings on all "Hewlett-Packard" 452 * Disable OSI(Linux) warnings on all "Hewlett-Packard"
407 * 453 *
@@ -443,10 +489,11 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
443 * _OSI(Linux) helps sound 489 * _OSI(Linux) helps sound
444 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"), 490 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
445 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"), 491 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
492 * _OSI(Linux) has Linux specific hooks
493 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
446 * _OSI(Linux) is a NOP: 494 * _OSI(Linux) is a NOP:
447 * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"), 495 * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"),
448 * _OSI(Linux) effect unknown 496 * DMI_MATCH(DMI_PRODUCT_VERSION, "LENOVO3000 V100"),
449 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
450 */ 497 */
451 { 498 {
452 .callback = dmi_enable_osi_linux, 499 .callback = dmi_enable_osi_linux,
@@ -465,7 +512,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
465 }, 512 },
466 }, 513 },
467 { 514 {
468 .callback = dmi_unknown_osi_linux, 515 .callback = dmi_enable_osi_linux,
469 .ident = "Lenovo ThinkPad X61", 516 .ident = "Lenovo ThinkPad X61",
470 .matches = { 517 .matches = {
471 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 518 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -473,7 +520,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
473 }, 520 },
474 }, 521 },
475 { 522 {
476 .callback = dmi_unknown_osi_linux, 523 .callback = dmi_disable_osi_linux,
477 .ident = "Lenovo 3000 V100", 524 .ident = "Lenovo 3000 V100",
478 .matches = { 525 .matches = {
479 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 526 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -543,8 +590,9 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
543 * Disable OSI(Linux) warnings on all "Sony Corporation" 590 * Disable OSI(Linux) warnings on all "Sony Corporation"
544 * 591 *
545 * _OSI(Linux) is a NOP: 592 * _OSI(Linux) is a NOP:
546 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ650N"), 593 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NR11S_S"),
547 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ38GP_C"), 594 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ38GP_C"),
595 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SZ650N"),
548 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-TZ21MN_N"), 596 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-TZ21MN_N"),
549 * _OSI(Linux) unknown effect: 597 * _OSI(Linux) unknown effect:
550 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ11M"), 598 * DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ11M"),
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 7222a18a0319..caf873c14bfb 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -943,7 +943,11 @@ int __init acpi_ec_ecdt_probe(void)
943 boot_ec->command_addr = ecdt_ptr->control.address; 943 boot_ec->command_addr = ecdt_ptr->control.address;
944 boot_ec->data_addr = ecdt_ptr->data.address; 944 boot_ec->data_addr = ecdt_ptr->data.address;
945 boot_ec->gpe = ecdt_ptr->gpe; 945 boot_ec->gpe = ecdt_ptr->gpe;
946 boot_ec->handle = ACPI_ROOT_OBJECT; 946 if (ACPI_FAILURE(acpi_get_handle(NULL, ecdt_ptr->id,
947 &boot_ec->handle))) {
948 pr_info("Failed to locate handle for boot EC\n");
949 boot_ec->handle = ACPI_ROOT_OBJECT;
950 }
947 } else { 951 } else {
948 /* This workaround is needed only on some broken machines, 952 /* This workaround is needed only on some broken machines,
949 * which require early EC, but fail to provide ECDT */ 953 * which require early EC, but fail to provide ECDT */
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 5479dc0eeeec..abec1ca94cf4 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -110,7 +110,7 @@ static const struct file_operations acpi_system_event_ops = {
110#endif /* CONFIG_ACPI_PROC_EVENT */ 110#endif /* CONFIG_ACPI_PROC_EVENT */
111 111
112/* ACPI notifier chain */ 112/* ACPI notifier chain */
113BLOCKING_NOTIFIER_HEAD(acpi_chain_head); 113static BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
114 114
115int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data) 115int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
116{ 116{
diff --git a/drivers/acpi/executer/exregion.c b/drivers/acpi/executer/exregion.c
index 2e9ce94798c7..3f51b7e84a17 100644
--- a/drivers/acpi/executer/exregion.c
+++ b/drivers/acpi/executer/exregion.c
@@ -338,6 +338,7 @@ acpi_ex_pci_config_space_handler(u32 function,
338 acpi_status status = AE_OK; 338 acpi_status status = AE_OK;
339 struct acpi_pci_id *pci_id; 339 struct acpi_pci_id *pci_id;
340 u16 pci_register; 340 u16 pci_register;
341 u32 value32;
341 342
342 ACPI_FUNCTION_TRACE(ex_pci_config_space_handler); 343 ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
343 344
@@ -364,9 +365,9 @@ acpi_ex_pci_config_space_handler(u32 function,
364 switch (function) { 365 switch (function) {
365 case ACPI_READ: 366 case ACPI_READ:
366 367
367 *value = 0;
368 status = acpi_os_read_pci_configuration(pci_id, pci_register, 368 status = acpi_os_read_pci_configuration(pci_id, pci_register,
369 value, bit_width); 369 &value32, bit_width);
370 *value = value32;
370 break; 371 break;
371 372
372 case ACPI_WRITE: 373 case ACPI_WRITE:
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 48cb705b274a..c8e3cba423ef 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -256,22 +256,28 @@ static int acpi_fan_add(struct acpi_device *device)
256 256
257 cdev = thermal_cooling_device_register("Fan", device, 257 cdev = thermal_cooling_device_register("Fan", device,
258 &fan_cooling_ops); 258 &fan_cooling_ops);
259 if (cdev) 259 if (IS_ERR(cdev)) {
260 result = PTR_ERR(cdev);
261 goto end;
262 }
263 if (cdev) {
260 printk(KERN_INFO PREFIX 264 printk(KERN_INFO PREFIX
261 "%s is registered as cooling_device%d\n", 265 "%s is registered as cooling_device%d\n",
262 device->dev.bus_id, cdev->id); 266 device->dev.bus_id, cdev->id);
263 else
264 goto end;
265 acpi_driver_data(device) = cdev;
266 result = sysfs_create_link(&device->dev.kobj, &cdev->device.kobj,
267 "thermal_cooling");
268 if (result)
269 return result;
270 267
271 result = sysfs_create_link(&cdev->device.kobj, &device->dev.kobj, 268 acpi_driver_data(device) = cdev;
272 "device"); 269 result = sysfs_create_link(&device->dev.kobj,
273 if (result) 270 &cdev->device.kobj,
274 return result; 271 "thermal_cooling");
272 if (result)
273 return result;
274
275 result = sysfs_create_link(&cdev->device.kobj,
276 &device->dev.kobj,
277 "device");
278 if (result)
279 return result;
280 }
275 281
276 result = acpi_fan_add_fs(device); 282 result = acpi_fan_add_fs(device);
277 if (result) 283 if (result)
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index 058d0be5cbe2..4290e0193097 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -616,6 +616,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
616 return_ACPI_STATUS(status); 616 return_ACPI_STATUS(status);
617 } 617 }
618 618
619 arg.integer.value = sleep_state;
619 status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL); 620 status = acpi_evaluate_object(NULL, METHOD_NAME__WAK, &arg_list, NULL);
620 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 621 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
621 ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK")); 622 ACPI_EXCEPTION((AE_INFO, status, "During Method _WAK"));
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 34b3386dedca..8edba7b678eb 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -325,7 +325,7 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
325} 325}
326 326
327#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD 327#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
328struct acpi_table_header *acpi_find_dsdt_initrd(void) 328static struct acpi_table_header *acpi_find_dsdt_initrd(void)
329{ 329{
330 struct file *firmware_file; 330 struct file *firmware_file;
331 mm_segment_t oldfs; 331 mm_segment_t oldfs;
@@ -419,7 +419,7 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
419} 419}
420 420
421#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD 421#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
422int __init acpi_no_initrd_override_setup(char *s) 422static int __init acpi_no_initrd_override_setup(char *s)
423{ 423{
424 acpi_no_initrd_override = 1; 424 acpi_no_initrd_override = 1;
425 return 1; 425 return 1;
@@ -623,7 +623,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
623 623
624acpi_status 624acpi_status
625acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 625acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
626 void *value, u32 width) 626 u32 *value, u32 width)
627{ 627{
628 int result, size; 628 int result, size;
629 629
@@ -689,7 +689,6 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
689 acpi_status status; 689 acpi_status status;
690 unsigned long temp; 690 unsigned long temp;
691 acpi_object_type type; 691 acpi_object_type type;
692 u8 tu8;
693 692
694 acpi_get_parent(chandle, &handle); 693 acpi_get_parent(chandle, &handle);
695 if (handle != rhandle) { 694 if (handle != rhandle) {
@@ -704,6 +703,7 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
704 acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, 703 acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
705 &temp); 704 &temp);
706 if (ACPI_SUCCESS(status)) { 705 if (ACPI_SUCCESS(status)) {
706 u32 val;
707 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp)); 707 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
708 pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp)); 708 pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
709 709
@@ -712,24 +712,24 @@ static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */
712 712
713 /* any nicer way to get bus number of bridge ? */ 713 /* any nicer way to get bus number of bridge ? */
714 status = 714 status =
715 acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8, 715 acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
716 8); 716 8);
717 if (ACPI_SUCCESS(status) 717 if (ACPI_SUCCESS(status)
718 && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) { 718 && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
719 status = 719 status =
720 acpi_os_read_pci_configuration(pci_id, 0x18, 720 acpi_os_read_pci_configuration(pci_id, 0x18,
721 &tu8, 8); 721 &val, 8);
722 if (!ACPI_SUCCESS(status)) { 722 if (!ACPI_SUCCESS(status)) {
723 /* Certainly broken... FIX ME */ 723 /* Certainly broken... FIX ME */
724 return; 724 return;
725 } 725 }
726 *is_bridge = 1; 726 *is_bridge = 1;
727 pci_id->bus = tu8; 727 pci_id->bus = val;
728 status = 728 status =
729 acpi_os_read_pci_configuration(pci_id, 0x19, 729 acpi_os_read_pci_configuration(pci_id, 0x19,
730 &tu8, 8); 730 &val, 8);
731 if (ACPI_SUCCESS(status)) { 731 if (ACPI_SUCCESS(status)) {
732 *bus_number = tu8; 732 *bus_number = val;
733 } 733 }
734 } else 734 } else
735 *is_bridge = 0; 735 *is_bridge = 0;
@@ -1109,7 +1109,7 @@ void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1109 * string starting with '!' disables that string 1109 * string starting with '!' disables that string
1110 * otherwise string is added to list, augmenting built-in strings 1110 * otherwise string is added to list, augmenting built-in strings
1111 */ 1111 */
1112static int __init acpi_osi_setup(char *str) 1112int __init acpi_osi_setup(char *str)
1113{ 1113{
1114 if (str == NULL || *str == '\0') { 1114 if (str == NULL || *str == '\0') {
1115 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1115 printk(KERN_INFO PREFIX "_OSI method disabled\n");
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 75ccf5d18bf4..a3cc8a98255c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -670,21 +670,26 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
670 670
671 pr->cdev = thermal_cooling_device_register("Processor", device, 671 pr->cdev = thermal_cooling_device_register("Processor", device,
672 &processor_cooling_ops); 672 &processor_cooling_ops);
673 if (pr->cdev) 673 if (IS_ERR(pr->cdev)) {
674 result = PTR_ERR(pr->cdev);
675 goto end;
676 }
677 if (pr->cdev) {
674 printk(KERN_INFO PREFIX 678 printk(KERN_INFO PREFIX
675 "%s is registered as cooling_device%d\n", 679 "%s is registered as cooling_device%d\n",
676 device->dev.bus_id, pr->cdev->id); 680 device->dev.bus_id, pr->cdev->id);
677 else
678 goto end;
679 681
680 result = sysfs_create_link(&device->dev.kobj, &pr->cdev->device.kobj, 682 result = sysfs_create_link(&device->dev.kobj,
681 "thermal_cooling"); 683 &pr->cdev->device.kobj,
682 if (result) 684 "thermal_cooling");
683 return result; 685 if (result)
684 result = sysfs_create_link(&pr->cdev->device.kobj, &device->dev.kobj, 686 return result;
685 "device"); 687 result = sysfs_create_link(&pr->cdev->device.kobj,
686 if (result) 688 &device->dev.kobj,
687 return result; 689 "device");
690 if (result)
691 return result;
692 }
688 693
689 if (pr->flags.throttling) { 694 if (pr->flags.throttling) {
690 printk(KERN_INFO PREFIX "%s [%s] (supports", 695 printk(KERN_INFO PREFIX "%s [%s] (supports",
@@ -809,10 +814,12 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
809 814
810 acpi_processor_remove_fs(device); 815 acpi_processor_remove_fs(device);
811 816
812 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 817 if (pr->cdev) {
813 sysfs_remove_link(&pr->cdev->device.kobj, "device"); 818 sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
814 thermal_cooling_device_unregister(pr->cdev); 819 sysfs_remove_link(&pr->cdev->device.kobj, "device");
815 pr->cdev = NULL; 820 thermal_cooling_device_unregister(pr->cdev);
821 pr->cdev = NULL;
822 }
816 823
817 processors[pr->id] = NULL; 824 processors[pr->id] = NULL;
818 825
@@ -826,8 +833,6 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
826 * Acpi processor hotplug support * 833 * Acpi processor hotplug support *
827 ****************************************************************************/ 834 ****************************************************************************/
828 835
829static int is_processor_present(acpi_handle handle);
830
831static int is_processor_present(acpi_handle handle) 836static int is_processor_present(acpi_handle handle)
832{ 837{
833 acpi_status status; 838 acpi_status status;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 32003fdc91e8..6f3b217699e9 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -364,7 +364,7 @@ int acpi_processor_resume(struct acpi_device * device)
364 return 0; 364 return 0;
365} 365}
366 366
367#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 367#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
368static int tsc_halts_in_c(int state) 368static int tsc_halts_in_c(int state)
369{ 369{
370 switch (boot_cpu_data.x86_vendor) { 370 switch (boot_cpu_data.x86_vendor) {
@@ -544,7 +544,7 @@ static void acpi_processor_idle(void)
544 /* Get end time (ticks) */ 544 /* Get end time (ticks) */
545 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 545 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
546 546
547#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 547#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
548 /* TSC halts in C2, so notify users */ 548 /* TSC halts in C2, so notify users */
549 if (tsc_halts_in_c(ACPI_STATE_C2)) 549 if (tsc_halts_in_c(ACPI_STATE_C2))
550 mark_tsc_unstable("possible TSC halt in C2"); 550 mark_tsc_unstable("possible TSC halt in C2");
@@ -609,7 +609,7 @@ static void acpi_processor_idle(void)
609 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 609 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
610 } 610 }
611 611
612#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 612#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
613 /* TSC halts in C3, so notify users */ 613 /* TSC halts in C3, so notify users */
614 if (tsc_halts_in_c(ACPI_STATE_C3)) 614 if (tsc_halts_in_c(ACPI_STATE_C3))
615 mark_tsc_unstable("TSC halts in C3"); 615 mark_tsc_unstable("TSC halts in C3");
@@ -945,11 +945,16 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
945 * Otherwise, ignore this info and continue. 945 * Otherwise, ignore this info and continue.
946 */ 946 */
947 cx.entry_method = ACPI_CSTATE_HALT; 947 cx.entry_method = ACPI_CSTATE_HALT;
948 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
948 } else { 949 } else {
949 continue; 950 continue;
950 } 951 }
952 } else {
953 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
954 cx.address);
951 } 955 }
952 956
957
953 obj = &(element->package.elements[2]); 958 obj = &(element->package.elements[2]);
954 if (obj->type != ACPI_TYPE_INTEGER) 959 if (obj->type != ACPI_TYPE_INTEGER)
955 continue; 960 continue;
@@ -1420,6 +1425,14 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1420 return 0; 1425 return 0;
1421 1426
1422 local_irq_disable(); 1427 local_irq_disable();
1428
1429 /* Do not access any ACPI IO ports in suspend path */
1430 if (acpi_idle_suspend) {
1431 acpi_safe_halt();
1432 local_irq_enable();
1433 return 0;
1434 }
1435
1423 if (pr->flags.bm_check) 1436 if (pr->flags.bm_check)
1424 acpi_idle_update_bm_rld(pr, cx); 1437 acpi_idle_update_bm_rld(pr, cx);
1425 1438
@@ -1487,7 +1500,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1487 acpi_idle_do_entry(cx); 1500 acpi_idle_do_entry(cx);
1488 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1501 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1489 1502
1490#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 1503#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1491 /* TSC could halt in idle, so notify users */ 1504 /* TSC could halt in idle, so notify users */
1492 if (tsc_halts_in_c(cx->type)) 1505 if (tsc_halts_in_c(cx->type))
1493 mark_tsc_unstable("TSC halts in idle");; 1506 mark_tsc_unstable("TSC halts in idle");;
@@ -1601,7 +1614,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1601 spin_unlock(&c3_lock); 1614 spin_unlock(&c3_lock);
1602 } 1615 }
1603 1616
1604#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) 1617#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1605 /* TSC could halt in idle, so notify users */ 1618 /* TSC could halt in idle, so notify users */
1606 if (tsc_halts_in_c(ACPI_STATE_C3)) 1619 if (tsc_halts_in_c(ACPI_STATE_C3))
1607 mark_tsc_unstable("TSC halts in idle"); 1620 mark_tsc_unstable("TSC halts in idle");
@@ -1643,6 +1656,11 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1643 return -EINVAL; 1656 return -EINVAL;
1644 } 1657 }
1645 1658
1659 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1660 dev->states[i].name[0] = '\0';
1661 dev->states[i].desc[0] = '\0';
1662 }
1663
1646 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 1664 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1647 cx = &pr->power.states[i]; 1665 cx = &pr->power.states[i];
1648 state = &dev->states[count]; 1666 state = &dev->states[count];
@@ -1659,6 +1677,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1659 cpuidle_set_statedata(state, cx); 1677 cpuidle_set_statedata(state, cx);
1660 1678
1661 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1679 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1680 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1662 state->exit_latency = cx->latency; 1681 state->exit_latency = cx->latency;
1663 state->target_residency = cx->latency * latency_factor; 1682 state->target_residency = cx->latency * latency_factor;
1664 state->power_usage = cx->power; 1683 state->power_usage = cx->power;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 34f157571080..eba55b7d6c95 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -36,16 +36,20 @@ ACPI_MODULE_NAME("utils");
36/* -------------------------------------------------------------------------- 36/* --------------------------------------------------------------------------
37 Object Evaluation Helpers 37 Object Evaluation Helpers
38 -------------------------------------------------------------------------- */ 38 -------------------------------------------------------------------------- */
39static void
40acpi_util_eval_error(acpi_handle h, acpi_string p, acpi_status s)
41{
39#ifdef ACPI_DEBUG_OUTPUT 42#ifdef ACPI_DEBUG_OUTPUT
40#define acpi_util_eval_error(h,p,s) {\ 43 char prefix[80] = {'\0'};
41 char prefix[80] = {'\0'};\ 44 struct acpi_buffer buffer = {sizeof(prefix), prefix};
42 struct acpi_buffer buffer = {sizeof(prefix), prefix};\ 45 acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);
43 acpi_get_name(h, ACPI_FULL_PATHNAME, &buffer);\ 46 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n",
44 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluate [%s.%s]: %s\n",\ 47 (char *) prefix, p, acpi_format_exception(s)));
45 (char *) prefix, p, acpi_format_exception(s))); }
46#else 48#else
47#define acpi_util_eval_error(h,p,s) 49 return;
48#endif 50#endif
51}
52
49acpi_status 53acpi_status
50acpi_extract_package(union acpi_object *package, 54acpi_extract_package(union acpi_object *package,
51 struct acpi_buffer *format, struct acpi_buffer *buffer) 55 struct acpi_buffer *format, struct acpi_buffer *buffer)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 7f714fa2a454..12cce69b5441 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -731,6 +731,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
731 731
732 device->cdev = thermal_cooling_device_register("LCD", 732 device->cdev = thermal_cooling_device_register("LCD",
733 device->dev, &video_cooling_ops); 733 device->dev, &video_cooling_ops);
734 if (IS_ERR(device->cdev))
735 return;
736
734 if (device->cdev) { 737 if (device->cdev) {
735 printk(KERN_INFO PREFIX 738 printk(KERN_INFO PREFIX
736 "%s is registered as cooling_device%d\n", 739 "%s is registered as cooling_device%d\n",
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
index 36b84ab418dd..efacc9f8bfe3 100644
--- a/drivers/acpi/wmi.c
+++ b/drivers/acpi/wmi.c
@@ -247,7 +247,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
247 block = &wblock->gblock; 247 block = &wblock->gblock;
248 handle = wblock->handle; 248 handle = wblock->handle;
249 249
250 if (!block->flags & ACPI_WMI_METHOD) 250 if (!(block->flags & ACPI_WMI_METHOD))
251 return AE_BAD_DATA; 251 return AE_BAD_DATA;
252 252
253 if (block->instance_count < instance) 253 if (block->instance_count < instance)
@@ -673,11 +673,11 @@ static int __init acpi_wmi_init(void)
673{ 673{
674 acpi_status result; 674 acpi_status result;
675 675
676 INIT_LIST_HEAD(&wmi_blocks.list);
677
676 if (acpi_disabled) 678 if (acpi_disabled)
677 return -ENODEV; 679 return -ENODEV;
678 680
679 INIT_LIST_HEAD(&wmi_blocks.list);
680
681 result = acpi_bus_register_driver(&acpi_wmi_driver); 681 result = acpi_bus_register_driver(&acpi_wmi_driver);
682 682
683 if (result < 0) { 683 if (result < 0) {
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 29e71bddd6ff..1db93b619074 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -85,6 +85,7 @@ enum {
85 board_ahci_ign_iferr = 2, 85 board_ahci_ign_iferr = 2,
86 board_ahci_sb600 = 3, 86 board_ahci_sb600 = 3,
87 board_ahci_mv = 4, 87 board_ahci_mv = 4,
88 board_ahci_sb700 = 5,
88 89
89 /* global controller registers */ 90 /* global controller registers */
90 HOST_CAP = 0x00, /* host capabilities */ 91 HOST_CAP = 0x00, /* host capabilities */
@@ -442,6 +443,16 @@ static const struct ata_port_info ahci_port_info[] = {
442 .udma_mask = ATA_UDMA6, 443 .udma_mask = ATA_UDMA6,
443 .port_ops = &ahci_ops, 444 .port_ops = &ahci_ops,
444 }, 445 },
446 /* board_ahci_sb700 */
447 {
448 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
449 AHCI_HFLAG_NO_PMP),
450 .flags = AHCI_FLAG_COMMON,
451 .link_flags = AHCI_LFLAG_COMMON,
452 .pio_mask = 0x1f, /* pio0-4 */
453 .udma_mask = ATA_UDMA6,
454 .port_ops = &ahci_ops,
455 },
445}; 456};
446 457
447static const struct pci_device_id ahci_pci_tbl[] = { 458static const struct pci_device_id ahci_pci_tbl[] = {
@@ -484,12 +495,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
484 495
485 /* ATI */ 496 /* ATI */
486 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ 497 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
487 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700/800 */ 498 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
488 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700/800 */ 499 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
489 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700/800 */ 500 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
490 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700/800 */ 501 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
491 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb600 }, /* ATI SB700/800 */ 502 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
492 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb600 }, /* ATI SB700/800 */ 503 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
493 504
494 /* VIA */ 505 /* VIA */
495 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 506 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -1932,7 +1943,7 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1932 void __iomem *mmio = host->iomap[AHCI_PCI_BAR]; 1943 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1933 u32 ctl; 1944 u32 ctl;
1934 1945
1935 if (mesg.event == PM_EVENT_SUSPEND) { 1946 if (mesg.event & PM_EVENT_SLEEP) {
1936 /* AHCI spec rev1.1 section 8.3.3: 1947 /* AHCI spec rev1.1 section 8.3.3:
1937 * Software must disable interrupts prior to requesting a 1948 * Software must disable interrupts prior to requesting a
1938 * transition of the HBA to D3 state. 1949 * transition of the HBA to D3 state.
@@ -1975,16 +1986,11 @@ static int ahci_port_start(struct ata_port *ap)
1975 struct ahci_port_priv *pp; 1986 struct ahci_port_priv *pp;
1976 void *mem; 1987 void *mem;
1977 dma_addr_t mem_dma; 1988 dma_addr_t mem_dma;
1978 int rc;
1979 1989
1980 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1990 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1981 if (!pp) 1991 if (!pp)
1982 return -ENOMEM; 1992 return -ENOMEM;
1983 1993
1984 rc = ata_pad_alloc(ap, dev);
1985 if (rc)
1986 return rc;
1987
1988 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, 1994 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
1989 GFP_KERNEL); 1995 GFP_KERNEL);
1990 if (!mem) 1996 if (!mem)
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 9c2515f67de5..fae8404254c0 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1339,7 +1339,7 @@ static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1339 * cycles and power trying to do something to the sleeping 1339 * cycles and power trying to do something to the sleeping
1340 * beauty. 1340 * beauty.
1341 */ 1341 */
1342 if (piix_broken_suspend() && mesg.event == PM_EVENT_SUSPEND) { 1342 if (piix_broken_suspend() && (mesg.event & PM_EVENT_SLEEP)) {
1343 pci_save_state(pdev); 1343 pci_save_state(pdev);
1344 1344
1345 /* mark its power state as "unknown", since we don't 1345 /* mark its power state as "unknown", since we don't
@@ -1652,7 +1652,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
1652 u8 tmp; 1652 u8 tmp;
1653 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 1653 pci_read_config_byte(pdev, PIIX_SCC, &tmp);
1654 if (tmp == PIIX_AHCI_DEVICE) { 1654 if (tmp == PIIX_AHCI_DEVICE) {
1655 int rc = piix_disable_ahci(pdev); 1655 rc = piix_disable_ahci(pdev);
1656 if (rc) 1656 if (rc)
1657 return rc; 1657 return rc;
1658 } 1658 }
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3011919f3ec8..fbc24358ada0 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -87,6 +87,28 @@ static struct workqueue_struct *ata_wq;
87 87
88struct workqueue_struct *ata_aux_wq; 88struct workqueue_struct *ata_aux_wq;
89 89
90struct ata_force_param {
91 const char *name;
92 unsigned int cbl;
93 int spd_limit;
94 unsigned long xfer_mask;
95 unsigned int horkage_on;
96 unsigned int horkage_off;
97};
98
99struct ata_force_ent {
100 int port;
101 int device;
102 struct ata_force_param param;
103};
104
105static struct ata_force_ent *ata_force_tbl;
106static int ata_force_tbl_size;
107
108static char ata_force_param_buf[PAGE_SIZE] __initdata;
109module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0444);
110MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
111
90int atapi_enabled = 1; 112int atapi_enabled = 1;
91module_param(atapi_enabled, int, 0444); 113module_param(atapi_enabled, int, 0444);
92MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 114MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
@@ -130,6 +152,179 @@ MODULE_VERSION(DRV_VERSION);
130 152
131 153
132/** 154/**
155 * ata_force_cbl - force cable type according to libata.force
156 * @ap: ATA port of interest
157 *
158 * Force cable type according to libata.force and whine about it.
159 * The last entry which has matching port number is used, so it
160 * can be specified as part of device force parameters. For
161 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
162 * same effect.
163 *
164 * LOCKING:
165 * EH context.
166 */
167void ata_force_cbl(struct ata_port *ap)
168{
169 int i;
170
171 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
172 const struct ata_force_ent *fe = &ata_force_tbl[i];
173
174 if (fe->port != -1 && fe->port != ap->print_id)
175 continue;
176
177 if (fe->param.cbl == ATA_CBL_NONE)
178 continue;
179
180 ap->cbl = fe->param.cbl;
181 ata_port_printk(ap, KERN_NOTICE,
182 "FORCE: cable set to %s\n", fe->param.name);
183 return;
184 }
185}
186
187/**
188 * ata_force_spd_limit - force SATA spd limit according to libata.force
189 * @link: ATA link of interest
190 *
191 * Force SATA spd limit according to libata.force and whine about
192 * it. When only the port part is specified (e.g. 1:), the limit
193 * applies to all links connected to both the host link and all
194 * fan-out ports connected via PMP. If the device part is
195 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
196 * link not the host link. Device number 15 always points to the
197 * host link whether PMP is attached or not.
198 *
199 * LOCKING:
200 * EH context.
201 */
202static void ata_force_spd_limit(struct ata_link *link)
203{
204 int linkno, i;
205
206 if (ata_is_host_link(link))
207 linkno = 15;
208 else
209 linkno = link->pmp;
210
211 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
212 const struct ata_force_ent *fe = &ata_force_tbl[i];
213
214 if (fe->port != -1 && fe->port != link->ap->print_id)
215 continue;
216
217 if (fe->device != -1 && fe->device != linkno)
218 continue;
219
220 if (!fe->param.spd_limit)
221 continue;
222
223 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
224 ata_link_printk(link, KERN_NOTICE,
225 "FORCE: PHY spd limit set to %s\n", fe->param.name);
226 return;
227 }
228}
229
230/**
231 * ata_force_xfermask - force xfermask according to libata.force
232 * @dev: ATA device of interest
233 *
234 * Force xfer_mask according to libata.force and whine about it.
235 * For consistency with link selection, device number 15 selects
236 * the first device connected to the host link.
237 *
238 * LOCKING:
239 * EH context.
240 */
241static void ata_force_xfermask(struct ata_device *dev)
242{
243 int devno = dev->link->pmp + dev->devno;
244 int alt_devno = devno;
245 int i;
246
247 /* allow n.15 for the first device attached to host port */
248 if (ata_is_host_link(dev->link) && devno == 0)
249 alt_devno = 15;
250
251 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
252 const struct ata_force_ent *fe = &ata_force_tbl[i];
253 unsigned long pio_mask, mwdma_mask, udma_mask;
254
255 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
256 continue;
257
258 if (fe->device != -1 && fe->device != devno &&
259 fe->device != alt_devno)
260 continue;
261
262 if (!fe->param.xfer_mask)
263 continue;
264
265 ata_unpack_xfermask(fe->param.xfer_mask,
266 &pio_mask, &mwdma_mask, &udma_mask);
267 if (udma_mask)
268 dev->udma_mask = udma_mask;
269 else if (mwdma_mask) {
270 dev->udma_mask = 0;
271 dev->mwdma_mask = mwdma_mask;
272 } else {
273 dev->udma_mask = 0;
274 dev->mwdma_mask = 0;
275 dev->pio_mask = pio_mask;
276 }
277
278 ata_dev_printk(dev, KERN_NOTICE,
279 "FORCE: xfer_mask set to %s\n", fe->param.name);
280 return;
281 }
282}
283
284/**
285 * ata_force_horkage - force horkage according to libata.force
286 * @dev: ATA device of interest
287 *
288 * Force horkage according to libata.force and whine about it.
289 * For consistency with link selection, device number 15 selects
290 * the first device connected to the host link.
291 *
292 * LOCKING:
293 * EH context.
294 */
295static void ata_force_horkage(struct ata_device *dev)
296{
297 int devno = dev->link->pmp + dev->devno;
298 int alt_devno = devno;
299 int i;
300
301 /* allow n.15 for the first device attached to host port */
302 if (ata_is_host_link(dev->link) && devno == 0)
303 alt_devno = 15;
304
305 for (i = 0; i < ata_force_tbl_size; i++) {
306 const struct ata_force_ent *fe = &ata_force_tbl[i];
307
308 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
309 continue;
310
311 if (fe->device != -1 && fe->device != devno &&
312 fe->device != alt_devno)
313 continue;
314
315 if (!(~dev->horkage & fe->param.horkage_on) &&
316 !(dev->horkage & fe->param.horkage_off))
317 continue;
318
319 dev->horkage |= fe->param.horkage_on;
320 dev->horkage &= ~fe->param.horkage_off;
321
322 ata_dev_printk(dev, KERN_NOTICE,
323 "FORCE: horkage modified (%s)\n", fe->param.name);
324 }
325}
326
327/**
133 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 328 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134 * @tf: Taskfile to convert 329 * @tf: Taskfile to convert
135 * @pmp: Port multiplier port 330 * @pmp: Port multiplier port
@@ -2067,6 +2262,7 @@ int ata_dev_configure(struct ata_device *dev)
2067 2262
2068 /* set horkage */ 2263 /* set horkage */
2069 dev->horkage |= ata_dev_blacklisted(dev); 2264 dev->horkage |= ata_dev_blacklisted(dev);
2265 ata_force_horkage(dev);
2070 2266
2071 /* let ACPI work its magic */ 2267 /* let ACPI work its magic */
2072 rc = ata_acpi_on_devcfg(dev); 2268 rc = ata_acpi_on_devcfg(dev);
@@ -2200,6 +2396,7 @@ int ata_dev_configure(struct ata_device *dev)
2200 else if (dev->class == ATA_DEV_ATAPI) { 2396 else if (dev->class == ATA_DEV_ATAPI) {
2201 const char *cdb_intr_string = ""; 2397 const char *cdb_intr_string = "";
2202 const char *atapi_an_string = ""; 2398 const char *atapi_an_string = "";
2399 const char *dma_dir_string = "";
2203 u32 sntf; 2400 u32 sntf;
2204 2401
2205 rc = atapi_cdb_len(id); 2402 rc = atapi_cdb_len(id);
@@ -2240,13 +2437,19 @@ int ata_dev_configure(struct ata_device *dev)
2240 cdb_intr_string = ", CDB intr"; 2437 cdb_intr_string = ", CDB intr";
2241 } 2438 }
2242 2439
2440 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2441 dev->flags |= ATA_DFLAG_DMADIR;
2442 dma_dir_string = ", DMADIR";
2443 }
2444
2243 /* print device info to dmesg */ 2445 /* print device info to dmesg */
2244 if (ata_msg_drv(ap) && print_info) 2446 if (ata_msg_drv(ap) && print_info)
2245 ata_dev_printk(dev, KERN_INFO, 2447 ata_dev_printk(dev, KERN_INFO,
2246 "ATAPI: %s, %s, max %s%s%s\n", 2448 "ATAPI: %s, %s, max %s%s%s%s\n",
2247 modelbuf, fwrevbuf, 2449 modelbuf, fwrevbuf,
2248 ata_mode_string(xfer_mask), 2450 ata_mode_string(xfer_mask),
2249 cdb_intr_string, atapi_an_string); 2451 cdb_intr_string, atapi_an_string,
2452 dma_dir_string);
2250 } 2453 }
2251 2454
2252 /* determine max_sectors */ 2455 /* determine max_sectors */
@@ -3048,6 +3251,8 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3048static int ata_dev_set_mode(struct ata_device *dev) 3251static int ata_dev_set_mode(struct ata_device *dev)
3049{ 3252{
3050 struct ata_eh_context *ehc = &dev->link->eh_context; 3253 struct ata_eh_context *ehc = &dev->link->eh_context;
3254 const char *dev_err_whine = "";
3255 int ign_dev_err = 0;
3051 unsigned int err_mask; 3256 unsigned int err_mask;
3052 int rc; 3257 int rc;
3053 3258
@@ -3057,41 +3262,57 @@ static int ata_dev_set_mode(struct ata_device *dev)
3057 3262
3058 err_mask = ata_dev_set_xfermode(dev); 3263 err_mask = ata_dev_set_xfermode(dev);
3059 3264
3265 if (err_mask & ~AC_ERR_DEV)
3266 goto fail;
3267
3268 /* revalidate */
3269 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3270 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3271 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3272 if (rc)
3273 return rc;
3274
3060 /* Old CFA may refuse this command, which is just fine */ 3275 /* Old CFA may refuse this command, which is just fine */
3061 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 3276 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
3062 err_mask &= ~AC_ERR_DEV; 3277 ign_dev_err = 1;
3063 3278
3064 /* Some very old devices and some bad newer ones fail any kind of 3279 /* Some very old devices and some bad newer ones fail any kind of
3065 SET_XFERMODE request but support PIO0-2 timings and no IORDY */ 3280 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3066 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && 3281 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3067 dev->pio_mode <= XFER_PIO_2) 3282 dev->pio_mode <= XFER_PIO_2)
3068 err_mask &= ~AC_ERR_DEV; 3283 ign_dev_err = 1;
3069 3284
3070 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3285 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3071 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3286 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3072 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3287 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3073 dev->dma_mode == XFER_MW_DMA_0 && 3288 dev->dma_mode == XFER_MW_DMA_0 &&
3074 (dev->id[63] >> 8) & 1) 3289 (dev->id[63] >> 8) & 1)
3075 err_mask &= ~AC_ERR_DEV; 3290 ign_dev_err = 1;
3076 3291
3077 if (err_mask) { 3292 /* if the device is actually configured correctly, ignore dev err */
3078 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 3293 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3079 "(err_mask=0x%x)\n", err_mask); 3294 ign_dev_err = 1;
3080 return -EIO;
3081 }
3082 3295
3083 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3296 if (err_mask & AC_ERR_DEV) {
3084 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3297 if (!ign_dev_err)
3085 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3298 goto fail;
3086 if (rc) 3299 else
3087 return rc; 3300 dev_err_whine = " (device error ignored)";
3301 }
3088 3302
3089 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3303 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3090 dev->xfer_shift, (int)dev->xfer_mode); 3304 dev->xfer_shift, (int)dev->xfer_mode);
3091 3305
3092 ata_dev_printk(dev, KERN_INFO, "configured for %s\n", 3306 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3093 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); 3307 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3308 dev_err_whine);
3309
3094 return 0; 3310 return 0;
3311
3312 fail:
3313 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3314 "(err_mask=0x%x)\n", err_mask);
3315 return -EIO;
3095} 3316}
3096 3317
3097/** 3318/**
@@ -3132,6 +3353,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3132 mode_mask = ATA_DMA_MASK_CFA; 3353 mode_mask = ATA_DMA_MASK_CFA;
3133 3354
3134 ata_dev_xfermask(dev); 3355 ata_dev_xfermask(dev);
3356 ata_force_xfermask(dev);
3135 3357
3136 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3358 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3137 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3359 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
@@ -4172,6 +4394,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4172 /* Devices which report 1 sector over size HPA */ 4394 /* Devices which report 1 sector over size HPA */
4173 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4395 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4174 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4396 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4397 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4175 4398
4176 /* Devices which get the IVB wrong */ 4399 /* Devices which get the IVB wrong */
4177 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4400 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
@@ -4474,30 +4697,13 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
4474 struct ata_port *ap = qc->ap; 4697 struct ata_port *ap = qc->ap;
4475 struct scatterlist *sg = qc->sg; 4698 struct scatterlist *sg = qc->sg;
4476 int dir = qc->dma_dir; 4699 int dir = qc->dma_dir;
4477 void *pad_buf = NULL;
4478 4700
4479 WARN_ON(sg == NULL); 4701 WARN_ON(sg == NULL);
4480 4702
4481 VPRINTK("unmapping %u sg elements\n", qc->mapped_n_elem); 4703 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4482 4704
4483 /* if we padded the buffer out to 32-bit bound, and data 4705 if (qc->n_elem)
4484 * xfer direction is from-device, we must copy from the 4706 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4485 * pad buffer back into the supplied buffer
4486 */
4487 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4488 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4489
4490 if (qc->mapped_n_elem)
4491 dma_unmap_sg(ap->dev, sg, qc->mapped_n_elem, dir);
4492 /* restore last sg */
4493 if (qc->last_sg)
4494 *qc->last_sg = qc->saved_last_sg;
4495 if (pad_buf) {
4496 struct scatterlist *psg = &qc->extra_sg[1];
4497 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4498 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4499 kunmap_atomic(addr, KM_IRQ0);
4500 }
4501 4707
4502 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4708 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4503 qc->sg = NULL; 4709 qc->sg = NULL;
@@ -4640,43 +4846,6 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4640} 4846}
4641 4847
4642/** 4848/**
4643 * atapi_qc_may_overflow - Check whether data transfer may overflow
4644 * @qc: ATA command in question
4645 *
4646 * ATAPI commands which transfer variable length data to host
4647 * might overflow due to application error or hardare bug. This
4648 * function checks whether overflow should be drained and ignored
4649 * for @qc.
4650 *
4651 * LOCKING:
4652 * None.
4653 *
4654 * RETURNS:
4655 * 1 if @qc may overflow; otherwise, 0.
4656 */
4657static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4658{
4659 if (qc->tf.protocol != ATAPI_PROT_PIO &&
4660 qc->tf.protocol != ATAPI_PROT_DMA)
4661 return 0;
4662
4663 if (qc->tf.flags & ATA_TFLAG_WRITE)
4664 return 0;
4665
4666 switch (qc->cdb[0]) {
4667 case READ_10:
4668 case READ_12:
4669 case WRITE_10:
4670 case WRITE_12:
4671 case GPCMD_READ_CD:
4672 case GPCMD_READ_CD_MSF:
4673 return 0;
4674 }
4675
4676 return 1;
4677}
4678
4679/**
4680 * ata_std_qc_defer - Check whether a qc needs to be deferred 4849 * ata_std_qc_defer - Check whether a qc needs to be deferred
4681 * @qc: ATA command in question 4850 * @qc: ATA command in question
4682 * 4851 *
@@ -4763,97 +4932,6 @@ void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4763 qc->cursg = qc->sg; 4932 qc->cursg = qc->sg;
4764} 4933}
4765 4934
4766static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4767 unsigned int *n_elem_extra,
4768 unsigned int *nbytes_extra)
4769{
4770 struct ata_port *ap = qc->ap;
4771 unsigned int n_elem = qc->n_elem;
4772 struct scatterlist *lsg, *copy_lsg = NULL, *tsg = NULL, *esg = NULL;
4773
4774 *n_elem_extra = 0;
4775 *nbytes_extra = 0;
4776
4777 /* needs padding? */
4778 qc->pad_len = qc->nbytes & 3;
4779
4780 if (likely(!qc->pad_len))
4781 return n_elem;
4782
4783 /* locate last sg and save it */
4784 lsg = sg_last(qc->sg, n_elem);
4785 qc->last_sg = lsg;
4786 qc->saved_last_sg = *lsg;
4787
4788 sg_init_table(qc->extra_sg, ARRAY_SIZE(qc->extra_sg));
4789
4790 if (qc->pad_len) {
4791 struct scatterlist *psg = &qc->extra_sg[1];
4792 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4793 unsigned int offset;
4794
4795 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4796
4797 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4798
4799 /* psg->page/offset are used to copy to-be-written
4800 * data in this function or read data in ata_sg_clean.
4801 */
4802 offset = lsg->offset + lsg->length - qc->pad_len;
4803 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4804 qc->pad_len, offset_in_page(offset));
4805
4806 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4807 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
4808 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4809 kunmap_atomic(addr, KM_IRQ0);
4810 }
4811
4812 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4813 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4814
4815 /* Trim the last sg entry and chain the original and
4816 * padding sg lists.
4817 *
4818 * Because chaining consumes one sg entry, one extra
4819 * sg entry is allocated and the last sg entry is
4820 * copied to it if the length isn't zero after padded
4821 * amount is removed.
4822 *
4823 * If the last sg entry is completely replaced by
4824 * padding sg entry, the first sg entry is skipped
4825 * while chaining.
4826 */
4827 lsg->length -= qc->pad_len;
4828 if (lsg->length) {
4829 copy_lsg = &qc->extra_sg[0];
4830 tsg = &qc->extra_sg[0];
4831 } else {
4832 n_elem--;
4833 tsg = &qc->extra_sg[1];
4834 }
4835
4836 esg = &qc->extra_sg[1];
4837
4838 (*n_elem_extra)++;
4839 (*nbytes_extra) += 4 - qc->pad_len;
4840 }
4841
4842 if (copy_lsg)
4843 sg_set_page(copy_lsg, sg_page(lsg), lsg->length, lsg->offset);
4844
4845 sg_chain(lsg, 1, tsg);
4846 sg_mark_end(esg);
4847
4848 /* sglist can't start with chaining sg entry, fast forward */
4849 if (qc->sg == lsg) {
4850 qc->sg = tsg;
4851 qc->cursg = tsg;
4852 }
4853
4854 return n_elem;
4855}
4856
4857/** 4935/**
4858 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4936 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4859 * @qc: Command with scatter-gather table to be mapped. 4937 * @qc: Command with scatter-gather table to be mapped.
@@ -4870,26 +4948,17 @@ static unsigned int ata_sg_setup_extra(struct ata_queued_cmd *qc,
4870static int ata_sg_setup(struct ata_queued_cmd *qc) 4948static int ata_sg_setup(struct ata_queued_cmd *qc)
4871{ 4949{
4872 struct ata_port *ap = qc->ap; 4950 struct ata_port *ap = qc->ap;
4873 unsigned int n_elem, n_elem_extra, nbytes_extra; 4951 unsigned int n_elem;
4874 4952
4875 VPRINTK("ENTER, ata%u\n", ap->print_id); 4953 VPRINTK("ENTER, ata%u\n", ap->print_id);
4876 4954
4877 n_elem = ata_sg_setup_extra(qc, &n_elem_extra, &nbytes_extra); 4955 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4956 if (n_elem < 1)
4957 return -1;
4878 4958
4879 if (n_elem) { 4959 DPRINTK("%d sg elements mapped\n", n_elem);
4880 n_elem = dma_map_sg(ap->dev, qc->sg, n_elem, qc->dma_dir);
4881 if (n_elem < 1) {
4882 /* restore last sg */
4883 if (qc->last_sg)
4884 *qc->last_sg = qc->saved_last_sg;
4885 return -1;
4886 }
4887 DPRINTK("%d sg elements mapped\n", n_elem);
4888 }
4889 4960
4890 qc->n_elem = qc->mapped_n_elem = n_elem; 4961 qc->n_elem = n_elem;
4891 qc->n_elem += n_elem_extra;
4892 qc->nbytes += nbytes_extra;
4893 qc->flags |= ATA_QCFLAG_DMAMAP; 4962 qc->flags |= ATA_QCFLAG_DMAMAP;
4894 4963
4895 return 0; 4964 return 0;
@@ -5127,46 +5196,22 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5127 */ 5196 */
5128static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 5197static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5129{ 5198{
5130 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 5199 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
5131 struct ata_port *ap = qc->ap; 5200 struct ata_port *ap = qc->ap;
5132 struct ata_eh_info *ehi = &qc->dev->link->eh_info; 5201 struct ata_device *dev = qc->dev;
5202 struct ata_eh_info *ehi = &dev->link->eh_info;
5133 struct scatterlist *sg; 5203 struct scatterlist *sg;
5134 struct page *page; 5204 struct page *page;
5135 unsigned char *buf; 5205 unsigned char *buf;
5136 unsigned int offset, count; 5206 unsigned int offset, count, consumed;
5137 5207
5138next_sg: 5208next_sg:
5139 sg = qc->cursg; 5209 sg = qc->cursg;
5140 if (unlikely(!sg)) { 5210 if (unlikely(!sg)) {
5141 /* 5211 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5142 * The end of qc->sg is reached and the device expects 5212 "buf=%u cur=%u bytes=%u",
5143 * more data to transfer. In order not to overrun qc->sg 5213 qc->nbytes, qc->curbytes, bytes);
5144 * and fulfill length specified in the byte count register, 5214 return -1;
5145 * - for read case, discard trailing data from the device
5146 * - for write case, padding zero data to the device
5147 */
5148 u16 pad_buf[1] = { 0 };
5149 unsigned int i;
5150
5151 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5152 ata_ehi_push_desc(ehi, "too much trailing data "
5153 "buf=%u cur=%u bytes=%u",
5154 qc->nbytes, qc->curbytes, bytes);
5155 return -1;
5156 }
5157
5158 /* overflow is exptected for misc ATAPI commands */
5159 if (bytes && !atapi_qc_may_overflow(qc))
5160 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5161 "trailing data (cdb=%02x nbytes=%u)\n",
5162 bytes, qc->cdb[0], qc->nbytes);
5163
5164 for (i = 0; i < (bytes + 1) / 2; i++)
5165 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
5166
5167 qc->curbytes += bytes;
5168
5169 return 0;
5170 } 5215 }
5171 5216
5172 page = sg_page(sg); 5217 page = sg_page(sg);
@@ -5192,18 +5237,16 @@ next_sg:
5192 buf = kmap_atomic(page, KM_IRQ0); 5237 buf = kmap_atomic(page, KM_IRQ0);
5193 5238
5194 /* do the actual data transfer */ 5239 /* do the actual data transfer */
5195 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 5240 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5196 5241
5197 kunmap_atomic(buf, KM_IRQ0); 5242 kunmap_atomic(buf, KM_IRQ0);
5198 local_irq_restore(flags); 5243 local_irq_restore(flags);
5199 } else { 5244 } else {
5200 buf = page_address(page); 5245 buf = page_address(page);
5201 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write); 5246 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5202 } 5247 }
5203 5248
5204 bytes -= count; 5249 bytes -= min(bytes, consumed);
5205 if ((count & 1) && bytes)
5206 bytes--;
5207 qc->curbytes += count; 5250 qc->curbytes += count;
5208 qc->cursg_ofs += count; 5251 qc->cursg_ofs += count;
5209 5252
@@ -5212,9 +5255,11 @@ next_sg:
5212 qc->cursg_ofs = 0; 5255 qc->cursg_ofs = 0;
5213 } 5256 }
5214 5257
5258 /* consumed can be larger than count only for the last transfer */
5259 WARN_ON(qc->cursg && count != consumed);
5260
5215 if (bytes) 5261 if (bytes)
5216 goto next_sg; 5262 goto next_sg;
5217
5218 return 0; 5263 return 0;
5219} 5264}
5220 5265
@@ -5232,6 +5277,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5232{ 5277{
5233 struct ata_port *ap = qc->ap; 5278 struct ata_port *ap = qc->ap;
5234 struct ata_device *dev = qc->dev; 5279 struct ata_device *dev = qc->dev;
5280 struct ata_eh_info *ehi = &dev->link->eh_info;
5235 unsigned int ireason, bc_lo, bc_hi, bytes; 5281 unsigned int ireason, bc_lo, bc_hi, bytes;
5236 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 5282 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5237 5283
@@ -5249,26 +5295,28 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5249 5295
5250 /* shall be cleared to zero, indicating xfer of data */ 5296 /* shall be cleared to zero, indicating xfer of data */
5251 if (unlikely(ireason & (1 << 0))) 5297 if (unlikely(ireason & (1 << 0)))
5252 goto err_out; 5298 goto atapi_check;
5253 5299
5254 /* make sure transfer direction matches expected */ 5300 /* make sure transfer direction matches expected */
5255 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 5301 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5256 if (unlikely(do_write != i_write)) 5302 if (unlikely(do_write != i_write))
5257 goto err_out; 5303 goto atapi_check;
5258 5304
5259 if (unlikely(!bytes)) 5305 if (unlikely(!bytes))
5260 goto err_out; 5306 goto atapi_check;
5261 5307
5262 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 5308 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5263 5309
5264 if (__atapi_pio_bytes(qc, bytes)) 5310 if (unlikely(__atapi_pio_bytes(qc, bytes)))
5265 goto err_out; 5311 goto err_out;
5266 ata_altstatus(ap); /* flush */ 5312 ata_altstatus(ap); /* flush */
5267 5313
5268 return; 5314 return;
5269 5315
5270err_out: 5316 atapi_check:
5271 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n"); 5317 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5318 ireason, bytes);
5319 err_out:
5272 qc->err_mask |= AC_ERR_HSM; 5320 qc->err_mask |= AC_ERR_HSM;
5273 ap->hsm_task_state = HSM_ST_ERR; 5321 ap->hsm_task_state = HSM_ST_ERR;
5274} 5322}
@@ -5953,9 +6001,6 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5953 */ 6001 */
5954 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 6002 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
5955 6003
5956 /* ata_sg_setup() may update nbytes */
5957 qc->raw_nbytes = qc->nbytes;
5958
5959 if (ata_is_dma(prot) || (ata_is_pio(prot) && 6004 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5960 (ap->flags & ATA_FLAG_PIO_DMA))) 6005 (ap->flags & ATA_FLAG_PIO_DMA)))
5961 if (ata_sg_setup(qc)) 6006 if (ata_sg_setup(qc))
@@ -6522,8 +6567,6 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6522 ata_lpm_enable(host); 6567 ata_lpm_enable(host);
6523 6568
6524 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 6569 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6525 if (rc == 0)
6526 host->dev->power.power_state = mesg;
6527 return rc; 6570 return rc;
6528} 6571}
6529 6572
@@ -6542,7 +6585,6 @@ void ata_host_resume(struct ata_host *host)
6542{ 6585{
6543 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, 6586 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6544 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 6587 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6545 host->dev->power.power_state = PMSG_ON;
6546 6588
6547 /* reenable link pm */ 6589 /* reenable link pm */
6548 ata_lpm_disable(host); 6590 ata_lpm_disable(host);
@@ -6564,19 +6606,12 @@ void ata_host_resume(struct ata_host *host)
6564int ata_port_start(struct ata_port *ap) 6606int ata_port_start(struct ata_port *ap)
6565{ 6607{
6566 struct device *dev = ap->dev; 6608 struct device *dev = ap->dev;
6567 int rc;
6568 6609
6569 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 6610 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6570 GFP_KERNEL); 6611 GFP_KERNEL);
6571 if (!ap->prd) 6612 if (!ap->prd)
6572 return -ENOMEM; 6613 return -ENOMEM;
6573 6614
6574 rc = ata_pad_alloc(ap, dev);
6575 if (rc)
6576 return rc;
6577
6578 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6579 (unsigned long long)ap->prd_dma);
6580 return 0; 6615 return 0;
6581} 6616}
6582 6617
@@ -6663,7 +6698,8 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6663 */ 6698 */
6664int sata_link_init_spd(struct ata_link *link) 6699int sata_link_init_spd(struct ata_link *link)
6665{ 6700{
6666 u32 scontrol, spd; 6701 u32 scontrol;
6702 u8 spd;
6667 int rc; 6703 int rc;
6668 6704
6669 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 6705 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
@@ -6674,6 +6710,8 @@ int sata_link_init_spd(struct ata_link *link)
6674 if (spd) 6710 if (spd)
6675 link->hw_sata_spd_limit &= (1 << spd) - 1; 6711 link->hw_sata_spd_limit &= (1 << spd) - 1;
6676 6712
6713 ata_force_spd_limit(link);
6714
6677 link->sata_spd_limit = link->hw_sata_spd_limit; 6715 link->sata_spd_limit = link->hw_sata_spd_limit;
6678 6716
6679 return 0; 6717 return 0;
@@ -7068,7 +7106,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7068 DPRINTK("probe begin\n"); 7106 DPRINTK("probe begin\n");
7069 for (i = 0; i < host->n_ports; i++) { 7107 for (i = 0; i < host->n_ports; i++) {
7070 struct ata_port *ap = host->ports[i]; 7108 struct ata_port *ap = host->ports[i];
7071 int rc;
7072 7109
7073 /* probe */ 7110 /* probe */
7074 if (ap->ops->error_handler) { 7111 if (ap->ops->error_handler) {
@@ -7335,7 +7372,7 @@ void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7335 pci_save_state(pdev); 7372 pci_save_state(pdev);
7336 pci_disable_device(pdev); 7373 pci_disable_device(pdev);
7337 7374
7338 if (mesg.event == PM_EVENT_SUSPEND) 7375 if (mesg.event & PM_EVENT_SLEEP)
7339 pci_set_power_state(pdev, PCI_D3hot); 7376 pci_set_power_state(pdev, PCI_D3hot);
7340} 7377}
7341 7378
@@ -7385,10 +7422,187 @@ int ata_pci_device_resume(struct pci_dev *pdev)
7385 7422
7386#endif /* CONFIG_PCI */ 7423#endif /* CONFIG_PCI */
7387 7424
7425static int __init ata_parse_force_one(char **cur,
7426 struct ata_force_ent *force_ent,
7427 const char **reason)
7428{
7429 /* FIXME: Currently, there's no way to tag init const data and
7430 * using __initdata causes build failure on some versions of
7431 * gcc. Once __initdataconst is implemented, add const to the
7432 * following structure.
7433 */
7434 static struct ata_force_param force_tbl[] __initdata = {
7435 { "40c", .cbl = ATA_CBL_PATA40 },
7436 { "80c", .cbl = ATA_CBL_PATA80 },
7437 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7438 { "unk", .cbl = ATA_CBL_PATA_UNK },
7439 { "ign", .cbl = ATA_CBL_PATA_IGN },
7440 { "sata", .cbl = ATA_CBL_SATA },
7441 { "1.5Gbps", .spd_limit = 1 },
7442 { "3.0Gbps", .spd_limit = 2 },
7443 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7444 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7445 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7446 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7447 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7448 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7449 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7450 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7451 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7452 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7453 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7454 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7455 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7456 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7457 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7458 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7459 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7460 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7461 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7462 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7463 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7464 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7465 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7466 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7467 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7468 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7469 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7470 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7471 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7472 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7473 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7474 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7475 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7476 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7477 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7478 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7479 };
7480 char *start = *cur, *p = *cur;
7481 char *id, *val, *endp;
7482 const struct ata_force_param *match_fp = NULL;
7483 int nr_matches = 0, i;
7484
7485 /* find where this param ends and update *cur */
7486 while (*p != '\0' && *p != ',')
7487 p++;
7488
7489 if (*p == '\0')
7490 *cur = p;
7491 else
7492 *cur = p + 1;
7493
7494 *p = '\0';
7495
7496 /* parse */
7497 p = strchr(start, ':');
7498 if (!p) {
7499 val = strstrip(start);
7500 goto parse_val;
7501 }
7502 *p = '\0';
7503
7504 id = strstrip(start);
7505 val = strstrip(p + 1);
7506
7507 /* parse id */
7508 p = strchr(id, '.');
7509 if (p) {
7510 *p++ = '\0';
7511 force_ent->device = simple_strtoul(p, &endp, 10);
7512 if (p == endp || *endp != '\0') {
7513 *reason = "invalid device";
7514 return -EINVAL;
7515 }
7516 }
7517
7518 force_ent->port = simple_strtoul(id, &endp, 10);
7519 if (p == endp || *endp != '\0') {
7520 *reason = "invalid port/link";
7521 return -EINVAL;
7522 }
7523
7524 parse_val:
7525 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7526 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7527 const struct ata_force_param *fp = &force_tbl[i];
7528
7529 if (strncasecmp(val, fp->name, strlen(val)))
7530 continue;
7531
7532 nr_matches++;
7533 match_fp = fp;
7534
7535 if (strcasecmp(val, fp->name) == 0) {
7536 nr_matches = 1;
7537 break;
7538 }
7539 }
7540
7541 if (!nr_matches) {
7542 *reason = "unknown value";
7543 return -EINVAL;
7544 }
7545 if (nr_matches > 1) {
7546 *reason = "ambigious value";
7547 return -EINVAL;
7548 }
7549
7550 force_ent->param = *match_fp;
7551
7552 return 0;
7553}
7554
7555static void __init ata_parse_force_param(void)
7556{
7557 int idx = 0, size = 1;
7558 int last_port = -1, last_device = -1;
7559 char *p, *cur, *next;
7560
7561 /* calculate maximum number of params and allocate force_tbl */
7562 for (p = ata_force_param_buf; *p; p++)
7563 if (*p == ',')
7564 size++;
7565
7566 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7567 if (!ata_force_tbl) {
7568 printk(KERN_WARNING "ata: failed to extend force table, "
7569 "libata.force ignored\n");
7570 return;
7571 }
7572
7573 /* parse and populate the table */
7574 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7575 const char *reason = "";
7576 struct ata_force_ent te = { .port = -1, .device = -1 };
7577
7578 next = cur;
7579 if (ata_parse_force_one(&next, &te, &reason)) {
7580 printk(KERN_WARNING "ata: failed to parse force "
7581 "parameter \"%s\" (%s)\n",
7582 cur, reason);
7583 continue;
7584 }
7585
7586 if (te.port == -1) {
7587 te.port = last_port;
7588 te.device = last_device;
7589 }
7590
7591 ata_force_tbl[idx++] = te;
7592
7593 last_port = te.port;
7594 last_device = te.device;
7595 }
7596
7597 ata_force_tbl_size = idx;
7598}
7388 7599
7389static int __init ata_init(void) 7600static int __init ata_init(void)
7390{ 7601{
7391 ata_probe_timeout *= HZ; 7602 ata_probe_timeout *= HZ;
7603
7604 ata_parse_force_param();
7605
7392 ata_wq = create_workqueue("ata"); 7606 ata_wq = create_workqueue("ata");
7393 if (!ata_wq) 7607 if (!ata_wq)
7394 return -ENOMEM; 7608 return -ENOMEM;
@@ -7405,6 +7619,7 @@ static int __init ata_init(void)
7405 7619
7406static void __exit ata_exit(void) 7620static void __exit ata_exit(void)
7407{ 7621{
7622 kfree(ata_force_tbl);
7408 destroy_workqueue(ata_wq); 7623 destroy_workqueue(ata_wq);
7409 destroy_workqueue(ata_aux_wq); 7624 destroy_workqueue(ata_aux_wq);
7410} 7625}
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 4e31071acc02..698ce2cea52c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2393,9 +2393,11 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
2393 } 2393 }
2394 2394
2395 /* PDIAG- should have been released, ask cable type if post-reset */ 2395 /* PDIAG- should have been released, ask cable type if post-reset */
2396 if (ata_is_host_link(link) && ap->ops->cable_detect && 2396 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2397 (ehc->i.flags & ATA_EHI_DID_RESET)) 2397 if (ap->ops->cable_detect)
2398 ap->cbl = ap->ops->cable_detect(ap); 2398 ap->cbl = ap->ops->cable_detect(ap);
2399 ata_force_cbl(ap);
2400 }
2399 2401
2400 /* Configure new devices forward such that user doesn't see 2402 /* Configure new devices forward such that user doesn't see
2401 * device detection messages backwards. 2403 * device detection messages backwards.
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index caef2bbd4a8a..d91f5090ba9d 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -35,7 +35,7 @@ static unsigned int sata_pmp_read(struct ata_link *link, int reg, u32 *r_val)
35 ata_tf_init(pmp_dev, &tf); 35 ata_tf_init(pmp_dev, &tf);
36 tf.command = ATA_CMD_PMP_READ; 36 tf.command = ATA_CMD_PMP_READ;
37 tf.protocol = ATA_PROT_NODATA; 37 tf.protocol = ATA_PROT_NODATA;
38 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 38 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
39 tf.feature = reg; 39 tf.feature = reg;
40 tf.device = link->pmp; 40 tf.device = link->pmp;
41 41
@@ -71,7 +71,7 @@ static unsigned int sata_pmp_write(struct ata_link *link, int reg, u32 val)
71 ata_tf_init(pmp_dev, &tf); 71 ata_tf_init(pmp_dev, &tf);
72 tf.command = ATA_CMD_PMP_WRITE; 72 tf.command = ATA_CMD_PMP_WRITE;
73 tf.protocol = ATA_PROT_NODATA; 73 tf.protocol = ATA_PROT_NODATA;
74 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 74 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
75 tf.feature = reg; 75 tf.feature = reg;
76 tf.device = link->pmp; 76 tf.device = link->pmp;
77 tf.nsect = val & 0xff; 77 tf.nsect = val & 0xff;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c02c490122dc..0562b0a49f3b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -826,30 +826,61 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
826 sdev->max_device_blocked = 1; 826 sdev->max_device_blocked = 1;
827} 827}
828 828
829static void ata_scsi_dev_config(struct scsi_device *sdev, 829/**
830 struct ata_device *dev) 830 * atapi_drain_needed - Check whether data transfer may overflow
831 * @rq: request to be checked
832 *
833 * ATAPI commands which transfer variable length data to host
834 * might overflow due to application error or hardare bug. This
835 * function checks whether overflow should be drained and ignored
836 * for @request.
837 *
838 * LOCKING:
839 * None.
840 *
841 * RETURNS:
842 * 1 if ; otherwise, 0.
843 */
844static int atapi_drain_needed(struct request *rq)
845{
846 if (likely(!blk_pc_request(rq)))
847 return 0;
848
849 if (!rq->data_len || (rq->cmd_flags & REQ_RW))
850 return 0;
851
852 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
853}
854
855static int ata_scsi_dev_config(struct scsi_device *sdev,
856 struct ata_device *dev)
831{ 857{
832 /* configure max sectors */ 858 /* configure max sectors */
833 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 859 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
834 860
835 /* SATA DMA transfers must be multiples of 4 byte, so
836 * we need to pad ATAPI transfers using an extra sg.
837 * Decrement max hw segments accordingly.
838 */
839 if (dev->class == ATA_DEV_ATAPI) { 861 if (dev->class == ATA_DEV_ATAPI) {
840 struct request_queue *q = sdev->request_queue; 862 struct request_queue *q = sdev->request_queue;
841 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 863 void *buf;
842 864
843 /* set the min alignment */ 865 /* set the min alignment */
844 blk_queue_update_dma_alignment(sdev->request_queue, 866 blk_queue_update_dma_alignment(sdev->request_queue,
845 ATA_DMA_PAD_SZ - 1); 867 ATA_DMA_PAD_SZ - 1);
846 } else 868
869 /* configure draining */
870 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
871 if (!buf) {
872 ata_dev_printk(dev, KERN_ERR,
873 "drain buffer allocation failed\n");
874 return -ENOMEM;
875 }
876
877 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
878 } else {
847 /* ATA devices must be sector aligned */ 879 /* ATA devices must be sector aligned */
848 blk_queue_update_dma_alignment(sdev->request_queue, 880 blk_queue_update_dma_alignment(sdev->request_queue,
849 ATA_SECT_SIZE - 1); 881 ATA_SECT_SIZE - 1);
850
851 if (dev->class == ATA_DEV_ATA)
852 sdev->manage_start_stop = 1; 882 sdev->manage_start_stop = 1;
883 }
853 884
854 if (dev->flags & ATA_DFLAG_AN) 885 if (dev->flags & ATA_DFLAG_AN)
855 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 886 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
@@ -861,6 +892,8 @@ static void ata_scsi_dev_config(struct scsi_device *sdev,
861 depth = min(ATA_MAX_QUEUE - 1, depth); 892 depth = min(ATA_MAX_QUEUE - 1, depth);
862 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 893 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
863 } 894 }
895
896 return 0;
864} 897}
865 898
866/** 899/**
@@ -879,13 +912,14 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
879{ 912{
880 struct ata_port *ap = ata_shost_to_port(sdev->host); 913 struct ata_port *ap = ata_shost_to_port(sdev->host);
881 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 914 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
915 int rc = 0;
882 916
883 ata_scsi_sdev_config(sdev); 917 ata_scsi_sdev_config(sdev);
884 918
885 if (dev) 919 if (dev)
886 ata_scsi_dev_config(sdev, dev); 920 rc = ata_scsi_dev_config(sdev, dev);
887 921
888 return 0; 922 return rc;
889} 923}
890 924
891/** 925/**
@@ -905,6 +939,7 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
905void ata_scsi_slave_destroy(struct scsi_device *sdev) 939void ata_scsi_slave_destroy(struct scsi_device *sdev)
906{ 940{
907 struct ata_port *ap = ata_shost_to_port(sdev->host); 941 struct ata_port *ap = ata_shost_to_port(sdev->host);
942 struct request_queue *q = sdev->request_queue;
908 unsigned long flags; 943 unsigned long flags;
909 struct ata_device *dev; 944 struct ata_device *dev;
910 945
@@ -920,6 +955,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
920 ata_port_schedule_eh(ap); 955 ata_port_schedule_eh(ap);
921 } 956 }
922 spin_unlock_irqrestore(ap->lock, flags); 957 spin_unlock_irqrestore(ap->lock, flags);
958
959 kfree(q->dma_drain_buffer);
960 q->dma_drain_buffer = NULL;
961 q->dma_drain_size = 0;
923} 962}
924 963
925/** 964/**
@@ -1862,7 +1901,7 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
1862 * spin_lock_irqsave(host lock) 1901 * spin_lock_irqsave(host lock)
1863 */ 1902 */
1864 1903
1865unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, 1904static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
1866 unsigned int buflen) 1905 unsigned int buflen)
1867{ 1906{
1868 u8 pbuf[60]; 1907 u8 pbuf[60];
@@ -2500,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2500 * want to set it properly, and for DMA where it is 2539 * want to set it properly, and for DMA where it is
2501 * effectively meaningless. 2540 * effectively meaningless.
2502 */ 2541 */
2503 nbytes = min(qc->nbytes, (unsigned int)63 * 1024); 2542 nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024);
2504 2543
2505 /* Most ATAPI devices which honor transfer chunk size don't 2544 /* Most ATAPI devices which honor transfer chunk size don't
2506 * behave according to the spec when odd chunk size which 2545 * behave according to the spec when odd chunk size which
@@ -2543,7 +2582,8 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2543 qc->tf.protocol = ATAPI_PROT_DMA; 2582 qc->tf.protocol = ATAPI_PROT_DMA;
2544 qc->tf.feature |= ATAPI_PKT_DMA; 2583 qc->tf.feature |= ATAPI_PKT_DMA;
2545 2584
2546 if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE)) 2585 if ((dev->flags & ATA_DFLAG_DMADIR) &&
2586 (scmd->sc_data_direction != DMA_TO_DEVICE))
2547 /* some SATA bridges need us to indicate data xfer direction */ 2587 /* some SATA bridges need us to indicate data xfer direction */
2548 qc->tf.feature |= ATAPI_DMADIR; 2588 qc->tf.feature |= ATAPI_DMADIR;
2549 } 2589 }
@@ -3555,7 +3595,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3555 * @ap: Port to initialize 3595 * @ap: Port to initialize
3556 * 3596 *
3557 * Called just after data structures for each port are 3597 * Called just after data structures for each port are
3558 * initialized. Allocates DMA pad. 3598 * initialized.
3559 * 3599 *
3560 * May be used as the port_start() entry in ata_port_operations. 3600 * May be used as the port_start() entry in ata_port_operations.
3561 * 3601 *
@@ -3564,7 +3604,7 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3564 */ 3604 */
3565int ata_sas_port_start(struct ata_port *ap) 3605int ata_sas_port_start(struct ata_port *ap)
3566{ 3606{
3567 return ata_pad_alloc(ap, ap->dev); 3607 return 0;
3568} 3608}
3569EXPORT_SYMBOL_GPL(ata_sas_port_start); 3609EXPORT_SYMBOL_GPL(ata_sas_port_start);
3570 3610
@@ -3572,8 +3612,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
3572 * ata_port_stop - Undo ata_sas_port_start() 3612 * ata_port_stop - Undo ata_sas_port_start()
3573 * @ap: Port to shut down 3613 * @ap: Port to shut down
3574 * 3614 *
3575 * Frees the DMA pad.
3576 *
3577 * May be used as the port_stop() entry in ata_port_operations. 3615 * May be used as the port_stop() entry in ata_port_operations.
3578 * 3616 *
3579 * LOCKING: 3617 * LOCKING:
@@ -3582,7 +3620,6 @@ EXPORT_SYMBOL_GPL(ata_sas_port_start);
3582 3620
3583void ata_sas_port_stop(struct ata_port *ap) 3621void ata_sas_port_stop(struct ata_port *ap)
3584{ 3622{
3585 ata_pad_free(ap, ap->dev);
3586} 3623}
3587EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3624EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3588 3625
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 409ffb9af163..6036dedfe377 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -61,6 +61,7 @@ extern int atapi_passthru16;
61extern int libata_fua; 61extern int libata_fua;
62extern int libata_noacpi; 62extern int libata_noacpi;
63extern int libata_allow_tpm; 63extern int libata_allow_tpm;
64extern void ata_force_cbl(struct ata_port *ap);
64extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev); 65extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
65extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 66extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
66 u64 block, u32 n_block, unsigned int tf_flags, 67 u64 block, u32 n_block, unsigned int tf_flags,
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 244098a80ce4..bdc3b9d7395c 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -77,8 +77,8 @@ static int pacpi_cable_detect(struct ata_port *ap)
77 77
78static void pacpi_error_handler(struct ata_port *ap) 78static void pacpi_error_handler(struct ata_port *ap)
79{ 79{
80 return ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset, 80 ata_bmdma_drive_eh(ap, pacpi_pre_reset, ata_std_softreset, NULL,
81 NULL, ata_std_postreset); 81 ata_std_postreset);
82} 82}
83 83
84/** 84/**
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 761a66608d7b..4b8d9b592ca4 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -146,9 +146,8 @@ static int amd_pre_reset(struct ata_link *link, unsigned long deadline)
146 146
147static void amd_error_handler(struct ata_port *ap) 147static void amd_error_handler(struct ata_port *ap)
148{ 148{
149 return ata_bmdma_drive_eh(ap, amd_pre_reset, 149 ata_bmdma_drive_eh(ap, amd_pre_reset, ata_std_softreset, NULL,
150 ata_std_softreset, NULL, 150 ata_std_postreset);
151 ata_std_postreset);
152} 151}
153 152
154static int amd_cable_detect(struct ata_port *ap) 153static int amd_cable_detect(struct ata_port *ap)
@@ -506,7 +505,6 @@ static struct ata_port_operations amd133_port_ops = {
506static struct ata_port_operations nv100_port_ops = { 505static struct ata_port_operations nv100_port_ops = {
507 .set_piomode = nv100_set_piomode, 506 .set_piomode = nv100_set_piomode,
508 .set_dmamode = nv100_set_dmamode, 507 .set_dmamode = nv100_set_dmamode,
509 .mode_filter = ata_pci_default_filter,
510 .tf_load = ata_tf_load, 508 .tf_load = ata_tf_load,
511 .tf_read = ata_tf_read, 509 .tf_read = ata_tf_read,
512 .check_status = ata_check_status, 510 .check_status = ata_check_status,
@@ -541,7 +539,6 @@ static struct ata_port_operations nv100_port_ops = {
541static struct ata_port_operations nv133_port_ops = { 539static struct ata_port_operations nv133_port_ops = {
542 .set_piomode = nv133_set_piomode, 540 .set_piomode = nv133_set_piomode,
543 .set_dmamode = nv133_set_dmamode, 541 .set_dmamode = nv133_set_dmamode,
544 .mode_filter = ata_pci_default_filter,
545 .tf_load = ata_tf_load, 542 .tf_load = ata_tf_load,
546 .tf_read = ata_tf_read, 543 .tf_read = ata_tf_read,
547 .check_status = ata_check_status, 544 .check_status = ata_check_status,
@@ -772,7 +769,7 @@ static void __exit amd_exit(void)
772} 769}
773 770
774MODULE_AUTHOR("Alan Cox"); 771MODULE_AUTHOR("Alan Cox");
775MODULE_DESCRIPTION("low-level driver for AMD PATA IDE"); 772MODULE_DESCRIPTION("low-level driver for AMD and Nvidia PATA IDE");
776MODULE_LICENSE("GPL"); 773MODULE_LICENSE("GPL");
777MODULE_DEVICE_TABLE(pci, amd); 774MODULE_DEVICE_TABLE(pci, amd);
778MODULE_VERSION(DRV_VERSION); 775MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 9623f5295530..408bdc1a9776 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -227,7 +227,7 @@ static struct scsi_host_template atiixp_sht = {
227 .queuecommand = ata_scsi_queuecmd, 227 .queuecommand = ata_scsi_queuecmd,
228 .can_queue = ATA_DEF_QUEUE, 228 .can_queue = ATA_DEF_QUEUE,
229 .this_id = ATA_SHT_THIS_ID, 229 .this_id = ATA_SHT_THIS_ID,
230 .sg_tablesize = LIBATA_MAX_PRD, 230 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
231 .cmd_per_lun = ATA_SHT_CMD_PER_LUN, 231 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
232 .emulated = ATA_SHT_EMULATED, 232 .emulated = ATA_SHT_EMULATED,
233 .use_clustering = ATA_SHT_USE_CLUSTERING, 233 .use_clustering = ATA_SHT_USE_CLUSTERING,
@@ -259,7 +259,7 @@ static struct ata_port_operations atiixp_port_ops = {
259 .bmdma_stop = atiixp_bmdma_stop, 259 .bmdma_stop = atiixp_bmdma_stop,
260 .bmdma_status = ata_bmdma_status, 260 .bmdma_status = ata_bmdma_status,
261 261
262 .qc_prep = ata_qc_prep, 262 .qc_prep = ata_dumb_qc_prep,
263 .qc_issue = ata_qc_issue_prot, 263 .qc_issue = ata_qc_issue_prot,
264 264
265 .data_xfer = ata_data_xfer, 265 .data_xfer = ata_data_xfer,
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index d753e568588e..1c4ff9b52b5c 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -40,7 +40,7 @@
40#include <asm/msr.h> 40#include <asm/msr.h>
41 41
42#define DRV_NAME "pata_cs5536" 42#define DRV_NAME "pata_cs5536"
43#define DRV_VERSION "0.0.6" 43#define DRV_VERSION "0.0.7"
44 44
45enum { 45enum {
46 CFG = 0, 46 CFG = 0,
@@ -85,7 +85,7 @@ static const u8 pci_reg[4] = {
85 PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC, 85 PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC,
86}; 86};
87 87
88static inline int cs5536_read(struct pci_dev *pdev, int reg, int *val) 88static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
89{ 89{
90 if (unlikely(use_msr)) { 90 if (unlikely(use_msr)) {
91 u32 dummy; 91 u32 dummy;
@@ -153,8 +153,8 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
153 struct ata_device *pair = ata_dev_pair(adev); 153 struct ata_device *pair = ata_dev_pair(adev);
154 int mode = adev->pio_mode - XFER_PIO_0; 154 int mode = adev->pio_mode - XFER_PIO_0;
155 int cmdmode = mode; 155 int cmdmode = mode;
156 int dshift = ap->port_no ? IDE_D1_SHIFT : IDE_D0_SHIFT; 156 int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
157 int cshift = ap->port_no ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT; 157 int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
158 u32 dtc, cast, etc; 158 u32 dtc, cast, etc;
159 159
160 if (pair) 160 if (pair)
@@ -201,7 +201,7 @@ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
201 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 201 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
202 u32 dtc, etc; 202 u32 dtc, etc;
203 int mode = adev->dma_mode; 203 int mode = adev->dma_mode;
204 int dshift = ap->port_no ? IDE_D1_SHIFT : IDE_D0_SHIFT; 204 int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
205 205
206 if (mode >= XFER_UDMA_0) { 206 if (mode >= XFER_UDMA_0) {
207 cs5536_read(pdev, ETC, &etc); 207 cs5536_read(pdev, ETC, &etc);
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 5b8586dac63b..f97068be2d79 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -304,12 +304,6 @@ static int icside_dma_init(struct pata_icside_info *info)
304} 304}
305 305
306 306
307static int pata_icside_port_start(struct ata_port *ap)
308{
309 /* No PRD to alloc */
310 return ata_pad_alloc(ap, ap->dev);
311}
312
313static struct scsi_host_template pata_icside_sht = { 307static struct scsi_host_template pata_icside_sht = {
314 .module = THIS_MODULE, 308 .module = THIS_MODULE,
315 .name = DRV_NAME, 309 .name = DRV_NAME,
@@ -389,8 +383,6 @@ static struct ata_port_operations pata_icside_port_ops = {
389 .irq_clear = ata_dummy_noret, 383 .irq_clear = ata_dummy_noret,
390 .irq_on = ata_irq_on, 384 .irq_on = ata_irq_on,
391 385
392 .port_start = pata_icside_port_start,
393
394 .bmdma_stop = pata_icside_bmdma_stop, 386 .bmdma_stop = pata_icside_bmdma_stop,
395 .bmdma_status = pata_icside_bmdma_status, 387 .bmdma_status = pata_icside_bmdma_status,
396}; 388};
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 5b8174d94067..00bbbbd50e97 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -115,7 +115,8 @@ static int jmicron_pre_reset(struct ata_link *link, unsigned long deadline)
115 115
116static void jmicron_error_handler(struct ata_port *ap) 116static void jmicron_error_handler(struct ata_port *ap)
117{ 117{
118 return ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL, ata_std_postreset); 118 ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL,
119 ata_std_postreset);
119} 120}
120 121
121/* No PIO or DMA methods needed for this device */ 122/* No PIO or DMA methods needed for this device */
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 333dc15f8ccf..50fe08ebe23c 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -127,7 +127,7 @@ static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
127static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */ 127static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
128static int qdi; /* Set to probe QDI controllers */ 128static int qdi; /* Set to probe QDI controllers */
129static int winbond; /* Set to probe Winbond controllers, 129static int winbond; /* Set to probe Winbond controllers,
130 give I/O port if non stdanard */ 130 give I/O port if non standard */
131static int autospeed; /* Chip present which snoops speed changes */ 131static int autospeed; /* Chip present which snoops speed changes */
132static int pio_mask = 0x1F; /* PIO range for autospeed devices */ 132static int pio_mask = 0x1F; /* PIO range for autospeed devices */
133static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 133static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
@@ -774,14 +774,14 @@ static struct ata_port_operations opti82c46x_port_ops = {
774static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev) 774static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
775{ 775{
776 struct ata_timing t; 776 struct ata_timing t;
777 struct legacy_data *qdi = ap->host->private_data; 777 struct legacy_data *ld_qdi = ap->host->private_data;
778 int active, recovery; 778 int active, recovery;
779 u8 timing; 779 u8 timing;
780 780
781 /* Get the timing data in cycles */ 781 /* Get the timing data in cycles */
782 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 782 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
783 783
784 if (qdi->fast) { 784 if (ld_qdi->fast) {
785 active = 8 - FIT(t.active, 1, 8); 785 active = 8 - FIT(t.active, 1, 8);
786 recovery = 18 - FIT(t.recover, 3, 18); 786 recovery = 18 - FIT(t.recover, 3, 18);
787 } else { 787 } else {
@@ -790,9 +790,9 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
790 } 790 }
791 timing = (recovery << 4) | active | 0x08; 791 timing = (recovery << 4) | active | 0x08;
792 792
793 qdi->clock[adev->devno] = timing; 793 ld_qdi->clock[adev->devno] = timing;
794 794
795 outb(timing, qdi->timing); 795 outb(timing, ld_qdi->timing);
796} 796}
797 797
798/** 798/**
@@ -808,14 +808,14 @@ static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
808static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev) 808static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
809{ 809{
810 struct ata_timing t; 810 struct ata_timing t;
811 struct legacy_data *qdi = ap->host->private_data; 811 struct legacy_data *ld_qdi = ap->host->private_data;
812 int active, recovery; 812 int active, recovery;
813 u8 timing; 813 u8 timing;
814 814
815 /* Get the timing data in cycles */ 815 /* Get the timing data in cycles */
816 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 816 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
817 817
818 if (qdi->fast) { 818 if (ld_qdi->fast) {
819 active = 8 - FIT(t.active, 1, 8); 819 active = 8 - FIT(t.active, 1, 8);
820 recovery = 18 - FIT(t.recover, 3, 18); 820 recovery = 18 - FIT(t.recover, 3, 18);
821 } else { 821 } else {
@@ -824,12 +824,12 @@ static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
824 } 824 }
825 timing = (recovery << 4) | active | 0x08; 825 timing = (recovery << 4) | active | 0x08;
826 826
827 qdi->clock[adev->devno] = timing; 827 ld_qdi->clock[adev->devno] = timing;
828 828
829 outb(timing, qdi->timing + 2 * ap->port_no); 829 outb(timing, ld_qdi->timing + 2 * ap->port_no);
830 /* Clear the FIFO */ 830 /* Clear the FIFO */
831 if (adev->class != ATA_DEV_ATA) 831 if (adev->class != ATA_DEV_ATA)
832 outb(0x5F, qdi->timing + 3); 832 outb(0x5F, ld_qdi->timing + 3);
833} 833}
834 834
835/** 835/**
@@ -845,14 +845,14 @@ static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
845static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev) 845static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
846{ 846{
847 struct ata_timing t; 847 struct ata_timing t;
848 struct legacy_data *qdi = ap->host->private_data; 848 struct legacy_data *ld_qdi = ap->host->private_data;
849 int active, recovery; 849 int active, recovery;
850 u8 timing; 850 u8 timing;
851 851
852 /* Get the timing data in cycles */ 852 /* Get the timing data in cycles */
853 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000); 853 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
854 854
855 if (qdi->fast) { 855 if (ld_qdi->fast) {
856 active = 8 - FIT(t.active, 1, 8); 856 active = 8 - FIT(t.active, 1, 8);
857 recovery = 18 - FIT(t.recover, 3, 18); 857 recovery = 18 - FIT(t.recover, 3, 18);
858 } else { 858 } else {
@@ -860,11 +860,11 @@ static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
860 recovery = 15 - FIT(t.recover, 0, 15); 860 recovery = 15 - FIT(t.recover, 0, 15);
861 } 861 }
862 timing = (recovery << 4) | active | 0x08; 862 timing = (recovery << 4) | active | 0x08;
863 qdi->clock[adev->devno] = timing; 863 ld_qdi->clock[adev->devno] = timing;
864 outb(timing, qdi->timing + 2 * adev->devno); 864 outb(timing, ld_qdi->timing + 2 * adev->devno);
865 /* Clear the FIFO */ 865 /* Clear the FIFO */
866 if (adev->class != ATA_DEV_ATA) 866 if (adev->class != ATA_DEV_ATA)
867 outb(0x5F, qdi->timing + 3); 867 outb(0x5F, ld_qdi->timing + 3);
868} 868}
869 869
870/** 870/**
@@ -879,12 +879,12 @@ static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
879{ 879{
880 struct ata_port *ap = qc->ap; 880 struct ata_port *ap = qc->ap;
881 struct ata_device *adev = qc->dev; 881 struct ata_device *adev = qc->dev;
882 struct legacy_data *qdi = ap->host->private_data; 882 struct legacy_data *ld_qdi = ap->host->private_data;
883 883
884 if (qdi->clock[adev->devno] != qdi->last) { 884 if (ld_qdi->clock[adev->devno] != ld_qdi->last) {
885 if (adev->pio_mode) { 885 if (adev->pio_mode) {
886 qdi->last = qdi->clock[adev->devno]; 886 ld_qdi->last = ld_qdi->clock[adev->devno];
887 outb(qdi->clock[adev->devno], qdi->timing + 887 outb(ld_qdi->clock[adev->devno], ld_qdi->timing +
888 2 * ap->port_no); 888 2 * ap->port_no);
889 } 889 }
890 } 890 }
@@ -1037,12 +1037,12 @@ static u8 winbond_readcfg(unsigned long port, u8 reg)
1037static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev) 1037static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
1038{ 1038{
1039 struct ata_timing t; 1039 struct ata_timing t;
1040 struct legacy_data *winbond = ap->host->private_data; 1040 struct legacy_data *ld_winbond = ap->host->private_data;
1041 int active, recovery; 1041 int active, recovery;
1042 u8 reg; 1042 u8 reg;
1043 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2); 1043 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
1044 1044
1045 reg = winbond_readcfg(winbond->timing, 0x81); 1045 reg = winbond_readcfg(ld_winbond->timing, 0x81);
1046 1046
1047 /* Get the timing data in cycles */ 1047 /* Get the timing data in cycles */
1048 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */ 1048 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
@@ -1053,7 +1053,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
1053 active = (FIT(t.active, 3, 17) - 1) & 0x0F; 1053 active = (FIT(t.active, 3, 17) - 1) & 0x0F;
1054 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F; 1054 recovery = (FIT(t.recover, 1, 15) + 1) & 0x0F;
1055 timing = (active << 4) | recovery; 1055 timing = (active << 4) | recovery;
1056 winbond_writecfg(winbond->timing, timing, reg); 1056 winbond_writecfg(ld_winbond->timing, timing, reg);
1057 1057
1058 /* Load the setup timing */ 1058 /* Load the setup timing */
1059 1059
@@ -1063,7 +1063,7 @@ static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
1063 if (!ata_pio_need_iordy(adev)) 1063 if (!ata_pio_need_iordy(adev))
1064 reg |= 0x02; /* IORDY off */ 1064 reg |= 0x02; /* IORDY off */
1065 reg |= (FIT(t.setup, 0, 3) << 6); 1065 reg |= (FIT(t.setup, 0, 3) << 6);
1066 winbond_writecfg(winbond->timing, timing + 1, reg); 1066 winbond_writecfg(ld_winbond->timing, timing + 1, reg);
1067} 1067}
1068 1068
1069static int winbond_port(struct platform_device *dev, 1069static int winbond_port(struct platform_device *dev,
@@ -1278,8 +1278,6 @@ static __init int legacy_init_one(struct legacy_probe *probe)
1278 } 1278 }
1279 } 1279 }
1280fail: 1280fail:
1281 if (host)
1282 ata_host_detach(host);
1283 platform_device_unregister(pdev); 1281 platform_device_unregister(pdev);
1284 return ret; 1282 return ret;
1285} 1283}
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 9afc8a32b226..a81f25d87235 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -85,8 +85,8 @@ static int marvell_cable_detect(struct ata_port *ap)
85 85
86static void marvell_error_handler(struct ata_port *ap) 86static void marvell_error_handler(struct ata_port *ap)
87{ 87{
88 return ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset, 88 ata_bmdma_drive_eh(ap, marvell_pre_reset, ata_std_softreset, NULL,
89 NULL, ata_std_postreset); 89 ata_std_postreset);
90} 90}
91 91
92/* No PIO or DMA methods needed for this device */ 92/* No PIO or DMA methods needed for this device */
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 1c1b83541d13..15dd649f89ee 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -17,6 +17,7 @@
17 * Base + 0x00 IRQ Status 17 * Base + 0x00 IRQ Status
18 * Base + 0x01 IRQ control 18 * Base + 0x01 IRQ control
19 * Base + 0x02 Chipset control 19 * Base + 0x02 Chipset control
20 * Base + 0x03 Unknown
20 * Base + 0x04 VDMA and reset control + wait bits 21 * Base + 0x04 VDMA and reset control + wait bits
21 * Base + 0x08 BMIMBA 22 * Base + 0x08 BMIMBA
22 * Base + 0x0C DMA Length 23 * Base + 0x0C DMA Length
@@ -174,8 +175,12 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
174 ata_std_ports(&ap->ioaddr); 175 ata_std_ports(&ap->ioaddr);
175 176
176 iowrite8(0x05, base + 0x01); /* Enable interrupt lines */ 177 iowrite8(0x05, base + 0x01); /* Enable interrupt lines */
177 iowrite8(0xB3, base + 0x02); /* Burst, ?? setup */ 178 iowrite8(0xBE, base + 0x02); /* Burst, ?? setup */
178 iowrite8(0x00, base + 0x04); /* WAIT0 ? */ 179 iowrite8(0x01, base + 0x03); /* Unknown */
180 iowrite8(0x20, base + 0x04); /* WAIT0 */
181 iowrite8(0x8f, base + 0x05); /* Unknown */
182 iowrite8(0xa4, base + 0x1c); /* Unknown */
183 iowrite8(0x83, base + 0x1d); /* BMDMA control: WAIT0 */
179 /* FIXME: Should we disable them at remove ? */ 184 /* FIXME: Should we disable them at remove ? */
180 return ata_host_activate(host, dev->irq, ata_interrupt, 185 return ata_host_activate(host, dev->irq, ata_interrupt,
181 IRQF_SHARED, &ninja32_sht); 186 IRQF_SHARED, &ninja32_sht);
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 55055b27524c..6c016deeaed8 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -1007,6 +1007,8 @@ static const struct ata_port_operations scc_pata_ops = {
1007 .qc_issue = ata_qc_issue_prot, 1007 .qc_issue = ata_qc_issue_prot,
1008 1008
1009 .freeze = scc_bmdma_freeze, 1009 .freeze = scc_bmdma_freeze,
1010 .thaw = ata_bmdma_thaw,
1011
1010 .error_handler = scc_error_handler, 1012 .error_handler = scc_error_handler,
1011 .post_internal_cmd = scc_bmdma_stop, 1013 .post_internal_cmd = scc_bmdma_stop,
1012 1014
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 39627ab684bf..d119a68c388f 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -84,6 +84,7 @@ enum {
84 VIA_BAD_ID = 0x100, /* Has wrong vendor ID (0x1107) */ 84 VIA_BAD_ID = 0x100, /* Has wrong vendor ID (0x1107) */
85 VIA_BAD_AST = 0x200, /* Don't touch Address Setup Timing */ 85 VIA_BAD_AST = 0x200, /* Don't touch Address Setup Timing */
86 VIA_NO_ENABLES = 0x400, /* Has no enablebits */ 86 VIA_NO_ENABLES = 0x400, /* Has no enablebits */
87 VIA_SATA_PATA = 0x800, /* SATA/PATA combined configuration */
87}; 88};
88 89
89/* 90/*
@@ -100,7 +101,7 @@ static const struct via_isa_bridge {
100 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 101 { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
101 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 102 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
102 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 103 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
103 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 104 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
104 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES}, 105 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
105 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 106 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
106 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 107 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
@@ -172,6 +173,9 @@ static int via_cable_detect(struct ata_port *ap) {
172 if (via_cable_override(pdev)) 173 if (via_cable_override(pdev))
173 return ATA_CBL_PATA40_SHORT; 174 return ATA_CBL_PATA40_SHORT;
174 175
176 if ((config->flags & VIA_SATA_PATA) && ap->port_no == 0)
177 return ATA_CBL_SATA;
178
175 /* Early chips are 40 wire */ 179 /* Early chips are 40 wire */
176 if ((config->flags & VIA_UDMA) < VIA_UDMA_66) 180 if ((config->flags & VIA_UDMA) < VIA_UDMA_66)
177 return ATA_CBL_PATA40; 181 return ATA_CBL_PATA40;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index efcb66b6ccef..07791a7a48a5 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -335,7 +335,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
335 dma_addr_t indirect_ext_segment_paddr; 335 dma_addr_t indirect_ext_segment_paddr;
336 unsigned int si; 336 unsigned int si;
337 337
338 VPRINTK("SATA FSL : cd = 0x%x, prd = 0x%x\n", cmd_desc, prd); 338 VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd);
339 339
340 indirect_ext_segment_paddr = cmd_desc_paddr + 340 indirect_ext_segment_paddr = cmd_desc_paddr +
341 SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16; 341 SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
@@ -459,7 +459,8 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
459 VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n", 459 VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
460 ioread32(CE + hcr_base), 460 ioread32(CE + hcr_base),
461 ioread32(DE + hcr_base), 461 ioread32(DE + hcr_base),
462 ioread32(CC + hcr_base), ioread32(COMMANDSTAT + csr_base)); 462 ioread32(CC + hcr_base),
463 ioread32(COMMANDSTAT + host_priv->csr_base));
463 464
464 return 0; 465 return 0;
465} 466}
@@ -522,7 +523,8 @@ static void sata_fsl_freeze(struct ata_port *ap)
522 ioread32(CQ + hcr_base), 523 ioread32(CQ + hcr_base),
523 ioread32(CA + hcr_base), 524 ioread32(CA + hcr_base),
524 ioread32(CE + hcr_base), ioread32(DE + hcr_base)); 525 ioread32(CE + hcr_base), ioread32(DE + hcr_base));
525 VPRINTK("CmdStat = 0x%x\n", ioread32(csr_base + COMMANDSTAT)); 526 VPRINTK("CmdStat = 0x%x\n",
527 ioread32(host_priv->csr_base + COMMANDSTAT));
526 528
527 /* disable interrupts on the controller/port */ 529 /* disable interrupts on the controller/port */
528 temp = ioread32(hcr_base + HCONTROL); 530 temp = ioread32(hcr_base + HCONTROL);
@@ -601,21 +603,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
601 if (!pp) 603 if (!pp)
602 return -ENOMEM; 604 return -ENOMEM;
603 605
604 /*
605 * allocate per command dma alignment pad buffer, which is used
606 * internally by libATA to ensure that all transfers ending on
607 * unaligned boundaries are padded, to align on Dword boundaries
608 */
609 retval = ata_pad_alloc(ap, dev);
610 if (retval) {
611 kfree(pp);
612 return retval;
613 }
614
615 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 606 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
616 GFP_KERNEL); 607 GFP_KERNEL);
617 if (!mem) { 608 if (!mem) {
618 ata_pad_free(ap, dev);
619 kfree(pp); 609 kfree(pp);
620 return -ENOMEM; 610 return -ENOMEM;
621 } 611 }
@@ -694,7 +684,6 @@ static void sata_fsl_port_stop(struct ata_port *ap)
694 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, 684 dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ,
695 pp->cmdslot, pp->cmdslot_paddr); 685 pp->cmdslot, pp->cmdslot_paddr);
696 686
697 ata_pad_free(ap, dev);
698 kfree(pp); 687 kfree(pp);
699} 688}
700 689
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 080b8362f8d6..6ebebde8454a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -870,7 +870,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
870 struct mv_host_priv *hpriv = ap->host->private_data; 870 struct mv_host_priv *hpriv = ap->host->private_data;
871 int hard_port = mv_hardport_from_port(ap->port_no); 871 int hard_port = mv_hardport_from_port(ap->port_no);
872 void __iomem *hc_mmio = mv_hc_base_from_port( 872 void __iomem *hc_mmio = mv_hc_base_from_port(
873 ap->host->iomap[MV_PRIMARY_BAR], hard_port); 873 mv_host_base(ap->host), hard_port);
874 u32 hc_irq_cause, ipending; 874 u32 hc_irq_cause, ipending;
875 875
876 /* clear EDMA event indicators, if any */ 876 /* clear EDMA event indicators, if any */
@@ -1158,17 +1158,13 @@ static int mv_port_start(struct ata_port *ap)
1158 struct mv_port_priv *pp; 1158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap); 1159 void __iomem *port_mmio = mv_ap_base(ap);
1160 unsigned long flags; 1160 unsigned long flags;
1161 int tag, rc; 1161 int tag;
1162 1162
1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1164 if (!pp) 1164 if (!pp)
1165 return -ENOMEM; 1165 return -ENOMEM;
1166 ap->private_data = pp; 1166 ap->private_data = pp;
1167 1167
1168 rc = ata_pad_alloc(ap, dev);
1169 if (rc)
1170 return rc;
1171
1172 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); 1168 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1173 if (!pp->crqb) 1169 if (!pp->crqb)
1174 return -ENOMEM; 1170 return -ENOMEM;
@@ -1542,7 +1538,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1542 eh_freeze_mask = EDMA_EH_FREEZE_5; 1538 eh_freeze_mask = EDMA_EH_FREEZE_5;
1543 1539
1544 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 1540 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1545 struct mv_port_priv *pp = ap->private_data; 1541 pp = ap->private_data;
1546 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1542 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1547 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1543 ata_ehi_push_desc(ehi, "EDMA self-disable");
1548 } 1544 }
@@ -1550,7 +1546,7 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1550 eh_freeze_mask = EDMA_EH_FREEZE; 1546 eh_freeze_mask = EDMA_EH_FREEZE;
1551 1547
1552 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1548 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1553 struct mv_port_priv *pp = ap->private_data; 1549 pp = ap->private_data;
1554 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1550 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1555 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1551 ata_ehi_push_desc(ehi, "EDMA self-disable");
1556 } 1552 }
@@ -1716,14 +1712,16 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1716 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1712 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1717 hc, relevant, hc_irq_cause); 1713 hc, relevant, hc_irq_cause);
1718 1714
1719 for (port = port0; port < port0 + last_port; port++) { 1715 for (port = port0; port < last_port; port++) {
1720 struct ata_port *ap = host->ports[port]; 1716 struct ata_port *ap = host->ports[port];
1721 struct mv_port_priv *pp = ap->private_data; 1717 struct mv_port_priv *pp;
1722 int have_err_bits, hard_port, shift; 1718 int have_err_bits, hard_port, shift;
1723 1719
1724 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED)) 1720 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1725 continue; 1721 continue;
1726 1722
1723 pp = ap->private_data;
1724
1727 shift = port << 1; /* (port * 2) */ 1725 shift = port << 1; /* (port * 2) */
1728 if (port >= MV_PORTS_PER_HC) { 1726 if (port >= MV_PORTS_PER_HC) {
1729 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1727 shift++; /* skip bit 8 in the HC Main IRQ reg */
@@ -2879,6 +2877,26 @@ done:
2879 return rc; 2877 return rc;
2880} 2878}
2881 2879
2880static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2881{
2882 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2883 MV_CRQB_Q_SZ, 0);
2884 if (!hpriv->crqb_pool)
2885 return -ENOMEM;
2886
2887 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2888 MV_CRPB_Q_SZ, 0);
2889 if (!hpriv->crpb_pool)
2890 return -ENOMEM;
2891
2892 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2893 MV_SG_TBL_SZ, 0);
2894 if (!hpriv->sg_tbl_pool)
2895 return -ENOMEM;
2896
2897 return 0;
2898}
2899
2882/** 2900/**
2883 * mv_platform_probe - handle a positive probe of an soc Marvell 2901 * mv_platform_probe - handle a positive probe of an soc Marvell
2884 * host 2902 * host
@@ -2929,9 +2947,14 @@ static int mv_platform_probe(struct platform_device *pdev)
2929 hpriv->n_ports = n_ports; 2947 hpriv->n_ports = n_ports;
2930 2948
2931 host->iomap = NULL; 2949 host->iomap = NULL;
2932 hpriv->base = ioremap(res->start, res->end - res->start + 1); 2950 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2951 res->end - res->start + 1);
2933 hpriv->base -= MV_SATAHC0_REG_BASE; 2952 hpriv->base -= MV_SATAHC0_REG_BASE;
2934 2953
2954 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2955 if (rc)
2956 return rc;
2957
2935 /* initialize adapter */ 2958 /* initialize adapter */
2936 rc = mv_init_host(host, chip_soc); 2959 rc = mv_init_host(host, chip_soc);
2937 if (rc) 2960 if (rc)
@@ -2957,11 +2980,8 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
2957{ 2980{
2958 struct device *dev = &pdev->dev; 2981 struct device *dev = &pdev->dev;
2959 struct ata_host *host = dev_get_drvdata(dev); 2982 struct ata_host *host = dev_get_drvdata(dev);
2960 struct mv_host_priv *hpriv = host->private_data;
2961 void __iomem *base = hpriv->base;
2962 2983
2963 ata_host_detach(host); 2984 ata_host_detach(host);
2964 iounmap(base);
2965 return 0; 2985 return 0;
2966} 2986}
2967 2987
@@ -3068,26 +3088,6 @@ static void mv_print_info(struct ata_host *host)
3068 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); 3088 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3069} 3089}
3070 3090
3071static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3072{
3073 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3074 MV_CRQB_Q_SZ, 0);
3075 if (!hpriv->crqb_pool)
3076 return -ENOMEM;
3077
3078 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3079 MV_CRPB_Q_SZ, 0);
3080 if (!hpriv->crpb_pool)
3081 return -ENOMEM;
3082
3083 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3084 MV_SG_TBL_SZ, 0);
3085 if (!hpriv->sg_tbl_pool)
3086 return -ENOMEM;
3087
3088 return 0;
3089}
3090
3091/** 3091/**
3092 * mv_pci_init_one - handle a positive probe of a PCI Marvell host 3092 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3093 * @pdev: PCI device found 3093 * @pdev: PCI device found
@@ -3192,6 +3192,7 @@ MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3192MODULE_LICENSE("GPL"); 3192MODULE_LICENSE("GPL");
3193MODULE_DEVICE_TABLE(pci, mv_pci_tbl); 3193MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3194MODULE_VERSION(DRV_VERSION); 3194MODULE_VERSION(DRV_VERSION);
3195MODULE_ALIAS("platform:sata_mv");
3195 3196
3196#ifdef CONFIG_PCI 3197#ifdef CONFIG_PCI
3197module_param(msi, int, 0444); 3198module_param(msi, int, 0444);
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index a07d319f6e8c..f251a5f569d5 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -543,7 +543,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
543 idx = 0; 543 idx = 0;
544 for_each_sg(qc->sg, sg, qc->n_elem, si) { 544 for_each_sg(qc->sg, sg, qc->n_elem, si) {
545 u32 addr, offset; 545 u32 addr, offset;
546 u32 sg_len, len; 546 u32 sg_len;
547 547
548 /* determine if physical DMA addr spans 64K boundary. 548 /* determine if physical DMA addr spans 64K boundary.
549 * Note h/w doesn't support 64-bit, so we unconditionally 549 * Note h/w doesn't support 64-bit, so we unconditionally
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index b4b1f91ea693..df7988df7908 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -1234,7 +1234,6 @@ static int sil24_port_start(struct ata_port *ap)
1234 union sil24_cmd_block *cb; 1234 union sil24_cmd_block *cb;
1235 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; 1235 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
1236 dma_addr_t cb_dma; 1236 dma_addr_t cb_dma;
1237 int rc;
1238 1237
1239 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); 1238 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1240 if (!pp) 1239 if (!pp)
@@ -1247,10 +1246,6 @@ static int sil24_port_start(struct ata_port *ap)
1247 return -ENOMEM; 1246 return -ENOMEM;
1248 memset(cb, 0, cb_size); 1247 memset(cb, 0, cb_size);
1249 1248
1250 rc = ata_pad_alloc(ap, dev);
1251 if (rc)
1252 return rc;
1253
1254 pp->cmd_block = cb; 1249 pp->cmd_block = cb;
1255 pp->cmd_block_dma = cb_dma; 1250 pp->cmd_block_dma = cb_dma;
1256 1251
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 30caa0337190..0d03f44824fb 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -333,8 +333,8 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
333 333
334static void vt6420_error_handler(struct ata_port *ap) 334static void vt6420_error_handler(struct ata_port *ap)
335{ 335{
336 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, 336 ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset, NULL,
337 NULL, ata_std_postreset); 337 ata_std_postreset);
338} 338}
339 339
340static int vt6421_pata_cable_detect(struct ata_port *ap) 340static int vt6421_pata_cable_detect(struct ata_port *ap)
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 055989e94799..2d207ad30336 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -658,9 +658,10 @@ int bus_add_driver(struct device_driver *drv)
658 pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name); 658 pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name);
659 659
660 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 660 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
661 if (!priv) 661 if (!priv) {
662 return -ENOMEM; 662 error = -ENOMEM;
663 663 goto out_put_bus;
664 }
664 klist_init(&priv->klist_devices, NULL, NULL); 665 klist_init(&priv->klist_devices, NULL, NULL);
665 priv->driver = drv; 666 priv->driver = drv;
666 drv->p = priv; 667 drv->p = priv;
@@ -668,7 +669,7 @@ int bus_add_driver(struct device_driver *drv)
668 error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL, 669 error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
669 "%s", drv->name); 670 "%s", drv->name);
670 if (error) 671 if (error)
671 goto out_put_bus; 672 goto out_unregister;
672 673
673 if (drv->bus->p->drivers_autoprobe) { 674 if (drv->bus->p->drivers_autoprobe) {
674 error = driver_attach(drv); 675 error = driver_attach(drv);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index ba75184c653c..bf31a0170a48 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -120,6 +120,9 @@ EXPORT_SYMBOL_GPL(driver_remove_file);
120 120
121/** 121/**
122 * driver_add_kobj - add a kobject below the specified driver 122 * driver_add_kobj - add a kobject below the specified driver
123 * @drv: requesting device driver
124 * @kobj: kobject to add below this driver
125 * @fmt: format string that names the kobject
123 * 126 *
124 * You really don't want to do this, this is only here due to one looney 127 * You really don't want to do this, this is only here due to one looney
125 * iseries driver, go poke those developers if you are annoyed about 128 * iseries driver, go poke those developers if you are annoyed about
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index bdc03f7e8424..ee9d1c8db0d6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -415,7 +415,7 @@ EXPORT_SYMBOL_GPL(device_power_down);
415 * @dev: Device. 415 * @dev: Device.
416 * @state: Power state device is entering. 416 * @state: Power state device is entering.
417 */ 417 */
418int suspend_device(struct device *dev, pm_message_t state) 418static int suspend_device(struct device *dev, pm_message_t state)
419{ 419{
420 int error = 0; 420 int error = 0;
421 421
@@ -479,7 +479,6 @@ static int dpm_suspend(pm_message_t state)
479 mutex_lock(&dpm_list_mtx); 479 mutex_lock(&dpm_list_mtx);
480 if (list_empty(&dev->power.entry)) 480 if (list_empty(&dev->power.entry))
481 list_add(&dev->power.entry, &dpm_locked); 481 list_add(&dev->power.entry, &dpm_locked);
482 mutex_unlock(&dpm_list_mtx);
483 break; 482 break;
484 } 483 }
485 mutex_lock(&dpm_list_mtx); 484 mutex_lock(&dpm_list_mtx);
@@ -523,6 +522,7 @@ static void lock_all_devices(void)
523 522
524/** 523/**
525 * device_suspend - Save state and stop all devices in system. 524 * device_suspend - Save state and stop all devices in system.
525 * @state: new power management state
526 * 526 *
527 * Prevent new devices from being registered, then lock all devices 527 * Prevent new devices from being registered, then lock all devices
528 * and suspend them. 528 * and suspend them.
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 018753c59b8e..b53fdb0a282c 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -655,6 +655,7 @@ static int __init nbd_init(void)
655 655
656 for (i = 0; i < nbds_max; i++) { 656 for (i = 0; i < nbds_max; i++) {
657 struct gendisk *disk = alloc_disk(1); 657 struct gendisk *disk = alloc_disk(1);
658 elevator_t *old_e;
658 if (!disk) 659 if (!disk)
659 goto out; 660 goto out;
660 nbd_dev[i].disk = disk; 661 nbd_dev[i].disk = disk;
@@ -668,6 +669,11 @@ static int __init nbd_init(void)
668 put_disk(disk); 669 put_disk(disk);
669 goto out; 670 goto out;
670 } 671 }
672 old_e = disk->queue->elevator;
673 if (elevator_init(disk->queue, "deadline") == 0 ||
674 elevator_init(disk->queue, "noop") == 0) {
675 elevator_exit(old_e);
676 }
671 } 677 }
672 678
673 if (register_blkdev(NBD_MAJOR, "nbd")) { 679 if (register_blkdev(NBD_MAJOR, "nbd")) {
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index b4e462f154ea..730ccea78e45 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -251,10 +251,6 @@ static int floppy_release(struct inode *inode, struct file *filp);
251static int floppy_check_change(struct gendisk *disk); 251static int floppy_check_change(struct gendisk *disk);
252static int floppy_revalidate(struct gendisk *disk); 252static int floppy_revalidate(struct gendisk *disk);
253 253
254#ifndef CONFIG_PMAC_MEDIABAY
255#define check_media_bay(which, what) 1
256#endif
257
258static void swim3_select(struct floppy_state *fs, int sel) 254static void swim3_select(struct floppy_state *fs, int sel)
259{ 255{
260 struct swim3 __iomem *sw = fs->swim3; 256 struct swim3 __iomem *sw = fs->swim3;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8afce67c0aa5..9c6f3f99208d 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -37,6 +37,7 @@
37 37
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
40#include <linux/hdreg.h>
40#include <linux/module.h> 41#include <linux/module.h>
41 42
42#include <xen/xenbus.h> 43#include <xen/xenbus.h>
@@ -135,6 +136,22 @@ static void blkif_restart_queue_callback(void *arg)
135 schedule_work(&info->work); 136 schedule_work(&info->work);
136} 137}
137 138
139int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
140{
141 /* We don't have real geometry info, but let's at least return
142 values consistent with the size of the device */
143 sector_t nsect = get_capacity(bd->bd_disk);
144 sector_t cylinders = nsect;
145
146 hg->heads = 0xff;
147 hg->sectors = 0x3f;
148 sector_div(cylinders, hg->heads * hg->sectors);
149 hg->cylinders = cylinders;
150 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
151 hg->cylinders = 0xffff;
152 return 0;
153}
154
138/* 155/*
139 * blkif_queue_request 156 * blkif_queue_request
140 * 157 *
@@ -937,6 +954,7 @@ static struct block_device_operations xlvbd_block_fops =
937 .owner = THIS_MODULE, 954 .owner = THIS_MODULE,
938 .open = blkif_open, 955 .open = blkif_open,
939 .release = blkif_release, 956 .release = blkif_release,
957 .getgeo = blkif_getgeo,
940}; 958};
941 959
942 960
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index e68821d074b0..7e31d5f1bc8a 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -208,6 +208,7 @@ static int hci_uart_close(struct hci_dev *hdev)
208 return 0; 208 return 0;
209 209
210 hci_uart_flush(hdev); 210 hci_uart_flush(hdev);
211 hdev->flush = NULL;
211 return 0; 212 return 0;
212} 213}
213 214
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 87be46406daf..d28669992147 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -41,6 +41,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
41 if (page_map->real == NULL) 41 if (page_map->real == NULL)
42 return -ENOMEM; 42 return -ENOMEM;
43 43
44#ifndef CONFIG_X86
44 SetPageReserved(virt_to_page(page_map->real)); 45 SetPageReserved(virt_to_page(page_map->real));
45 global_cache_flush(); 46 global_cache_flush();
46 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 47 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
@@ -52,6 +53,10 @@ static int amd_create_page_map(struct amd_page_map *page_map)
52 return -ENOMEM; 53 return -ENOMEM;
53 } 54 }
54 global_cache_flush(); 55 global_cache_flush();
56#else
57 set_memory_uc((unsigned long)page_map->real, 1);
58 page_map->remapped = page_map->real;
59#endif
55 60
56 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { 61 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
57 writel(agp_bridge->scratch_page, page_map->remapped+i); 62 writel(agp_bridge->scratch_page, page_map->remapped+i);
@@ -63,8 +68,12 @@ static int amd_create_page_map(struct amd_page_map *page_map)
63 68
64static void amd_free_page_map(struct amd_page_map *page_map) 69static void amd_free_page_map(struct amd_page_map *page_map)
65{ 70{
71#ifndef CONFIG_X86
66 iounmap(page_map->remapped); 72 iounmap(page_map->remapped);
67 ClearPageReserved(virt_to_page(page_map->real)); 73 ClearPageReserved(virt_to_page(page_map->real));
74#else
75 set_memory_wb((unsigned long)page_map->real, 1);
76#endif
68 free_page((unsigned long) page_map->real); 77 free_page((unsigned long) page_map->real);
69} 78}
70 79
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 2d46b713c8f2..55c97f623242 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -60,18 +60,9 @@ static int ati_create_page_map(struct ati_page_map *page_map)
60 if (page_map->real == NULL) 60 if (page_map->real == NULL)
61 return -ENOMEM; 61 return -ENOMEM;
62 62
63 SetPageReserved(virt_to_page(page_map->real)); 63 set_memory_uc((unsigned long)page_map->real, 1);
64 err = map_page_into_agp(virt_to_page(page_map->real)); 64 err = map_page_into_agp(virt_to_page(page_map->real));
65 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 65 page_map->remapped = page_map->real;
66 PAGE_SIZE);
67 if (page_map->remapped == NULL || err) {
68 ClearPageReserved(virt_to_page(page_map->real));
69 free_page((unsigned long) page_map->real);
70 page_map->real = NULL;
71 return -ENOMEM;
72 }
73 /*CACHE_FLUSH();*/
74 global_cache_flush();
75 66
76 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) { 67 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
77 writel(agp_bridge->scratch_page, page_map->remapped+i); 68 writel(agp_bridge->scratch_page, page_map->remapped+i);
@@ -85,8 +76,7 @@ static int ati_create_page_map(struct ati_page_map *page_map)
85static void ati_free_page_map(struct ati_page_map *page_map) 76static void ati_free_page_map(struct ati_page_map *page_map)
86{ 77{
87 unmap_page_from_agp(virt_to_page(page_map->real)); 78 unmap_page_from_agp(virt_to_page(page_map->real));
88 iounmap(page_map->remapped); 79 set_memory_wb((unsigned long)page_map->real, 1);
89 ClearPageReserved(virt_to_page(page_map->real));
90 free_page((unsigned long) page_map->real); 80 free_page((unsigned long) page_map->real);
91} 81}
92 82
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 7484bc759c4c..7fc0c99a3a58 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -932,9 +932,14 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
932 agp_gatt_table = (void *)table; 932 agp_gatt_table = (void *)table;
933 933
934 bridge->driver->cache_flush(); 934 bridge->driver->cache_flush();
935#ifdef CONFIG_X86
936 set_memory_uc((unsigned long)table, 1 << page_order);
937 bridge->gatt_table = (void *)table;
938#else
935 bridge->gatt_table = ioremap_nocache(virt_to_gart(table), 939 bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
936 (PAGE_SIZE * (1 << page_order))); 940 (PAGE_SIZE * (1 << page_order)));
937 bridge->driver->cache_flush(); 941 bridge->driver->cache_flush();
942#endif
938 943
939 if (bridge->gatt_table == NULL) { 944 if (bridge->gatt_table == NULL) {
940 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) 945 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
@@ -991,7 +996,11 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
991 * called, then all agp memory is deallocated and removed 996 * called, then all agp memory is deallocated and removed
992 * from the table. */ 997 * from the table. */
993 998
999#ifdef CONFIG_X86
1000 set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order);
1001#else
994 iounmap(bridge->gatt_table); 1002 iounmap(bridge->gatt_table);
1003#endif
995 table = (char *) bridge->gatt_table_real; 1004 table = (char *) bridge->gatt_table_real;
996 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); 1005 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
997 1006
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index eb1a1c738190..b6791846809f 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -14,6 +14,9 @@
14#define SIS_TLBCNTRL 0x97 14#define SIS_TLBCNTRL 0x97
15#define SIS_TLBFLUSH 0x98 15#define SIS_TLBFLUSH 0x98
16 16
17#define PCI_DEVICE_ID_SI_662 0x0662
18#define PCI_DEVICE_ID_SI_671 0x0671
19
17static int __devinitdata agp_sis_force_delay = 0; 20static int __devinitdata agp_sis_force_delay = 0;
18static int __devinitdata agp_sis_agp_spec = -1; 21static int __devinitdata agp_sis_agp_spec = -1;
19 22
@@ -27,8 +30,8 @@ static int sis_fetch_size(void)
27 values = A_SIZE_8(agp_bridge->driver->aperture_sizes); 30 values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
28 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) { 31 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
29 if ((temp_size == values[i].size_value) || 32 if ((temp_size == values[i].size_value) ||
30 ((temp_size & ~(0x03)) == 33 ((temp_size & ~(0x07)) ==
31 (values[i].size_value & ~(0x03)))) { 34 (values[i].size_value & ~(0x07)))) {
32 agp_bridge->previous_size = 35 agp_bridge->previous_size =
33 agp_bridge->current_size = (void *) (values + i); 36 agp_bridge->current_size = (void *) (values + i);
34 37
@@ -214,6 +217,26 @@ static void __devexit agp_sis_remove(struct pci_dev *pdev)
214 agp_put_bridge(bridge); 217 agp_put_bridge(bridge);
215} 218}
216 219
220#ifdef CONFIG_PM
221
222static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state)
223{
224 pci_save_state(pdev);
225 pci_set_power_state(pdev, pci_choose_state(pdev, state));
226
227 return 0;
228}
229
230static int agp_sis_resume(struct pci_dev *pdev)
231{
232 pci_set_power_state(pdev, PCI_D0);
233 pci_restore_state(pdev);
234
235 return sis_driver.configure();
236}
237
238#endif /* CONFIG_PM */
239
217static struct pci_device_id agp_sis_pci_table[] = { 240static struct pci_device_id agp_sis_pci_table[] = {
218 { 241 {
219 .class = (PCI_CLASS_BRIDGE_HOST << 8), 242 .class = (PCI_CLASS_BRIDGE_HOST << 8),
@@ -331,6 +354,22 @@ static struct pci_device_id agp_sis_pci_table[] = {
331 .class = (PCI_CLASS_BRIDGE_HOST << 8), 354 .class = (PCI_CLASS_BRIDGE_HOST << 8),
332 .class_mask = ~0, 355 .class_mask = ~0,
333 .vendor = PCI_VENDOR_ID_SI, 356 .vendor = PCI_VENDOR_ID_SI,
357 .device = PCI_DEVICE_ID_SI_662,
358 .subvendor = PCI_ANY_ID,
359 .subdevice = PCI_ANY_ID,
360 },
361 {
362 .class = (PCI_CLASS_BRIDGE_HOST << 8),
363 .class_mask = ~0,
364 .vendor = PCI_VENDOR_ID_SI,
365 .device = PCI_DEVICE_ID_SI_671,
366 .subvendor = PCI_ANY_ID,
367 .subdevice = PCI_ANY_ID,
368 },
369 {
370 .class = (PCI_CLASS_BRIDGE_HOST << 8),
371 .class_mask = ~0,
372 .vendor = PCI_VENDOR_ID_SI,
334 .device = PCI_DEVICE_ID_SI_730, 373 .device = PCI_DEVICE_ID_SI_730,
335 .subvendor = PCI_ANY_ID, 374 .subvendor = PCI_ANY_ID,
336 .subdevice = PCI_ANY_ID, 375 .subdevice = PCI_ANY_ID,
@@ -393,6 +432,10 @@ static struct pci_driver agp_sis_pci_driver = {
393 .id_table = agp_sis_pci_table, 432 .id_table = agp_sis_pci_table,
394 .probe = agp_sis_probe, 433 .probe = agp_sis_probe,
395 .remove = agp_sis_remove, 434 .remove = agp_sis_remove,
435#ifdef CONFIG_PM
436 .suspend = agp_sis_suspend,
437 .resume = agp_sis_resume,
438#endif
396}; 439};
397 440
398static int __init agp_sis_init(void) 441static int __init agp_sis_init(void)
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 551ef25063ef..e08934e58f32 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -52,28 +52,20 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map)
52 if (page_map->real == NULL) { 52 if (page_map->real == NULL) {
53 return -ENOMEM; 53 return -ENOMEM;
54 } 54 }
55 SetPageReserved(virt_to_page(page_map->real)); 55
56 global_cache_flush(); 56 set_memory_uc((unsigned long)page_map->real, 1);
57 page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real), 57 page_map->remapped = page_map->real;
58 PAGE_SIZE);
59 if (page_map->remapped == NULL) {
60 ClearPageReserved(virt_to_page(page_map->real));
61 free_page((unsigned long) page_map->real);
62 page_map->real = NULL;
63 return -ENOMEM;
64 }
65 global_cache_flush();
66 58
67 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) 59 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
68 writel(agp_bridge->scratch_page, page_map->remapped+i); 60 writel(agp_bridge->scratch_page, page_map->remapped+i);
61 /* Red Pen: Everyone else does pci posting flush here */
69 62
70 return 0; 63 return 0;
71} 64}
72 65
73static void serverworks_free_page_map(struct serverworks_page_map *page_map) 66static void serverworks_free_page_map(struct serverworks_page_map *page_map)
74{ 67{
75 iounmap(page_map->remapped); 68 set_memory_wb((unsigned long)page_map->real, 1);
76 ClearPageReserved(virt_to_page(page_map->real));
77 free_page((unsigned long) page_map->real); 69 free_page((unsigned long) page_map->real);
78} 70}
79 71
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 19d3be5c4b2d..a6789f25009b 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -568,7 +568,7 @@ struct drm_driver {
568 void (*postclose) (struct drm_device *, struct drm_file *); 568 void (*postclose) (struct drm_device *, struct drm_file *);
569 void (*lastclose) (struct drm_device *); 569 void (*lastclose) (struct drm_device *);
570 int (*unload) (struct drm_device *); 570 int (*unload) (struct drm_device *);
571 int (*suspend) (struct drm_device *); 571 int (*suspend) (struct drm_device *, pm_message_t state);
572 int (*resume) (struct drm_device *); 572 int (*resume) (struct drm_device *);
573 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); 573 int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
574 void (*dma_ready) (struct drm_device *); 574 void (*dma_ready) (struct drm_device *);
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index f52468843678..715b361f0c2b 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -83,6 +83,7 @@
83 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 83 {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
86 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
86 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 87 {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
87 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 88 {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
88 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 89 {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
@@ -236,6 +237,7 @@
236 {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \ 237 {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
237 {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \ 238 {0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
238 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 239 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
240 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
239 {0, 0, 0} 241 {0, 0, 0}
240 242
241#define r128_PCI_IDS \ 243#define r128_PCI_IDS \
@@ -313,6 +315,7 @@
313 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 315 {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
314 {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 316 {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
315 {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ 317 {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
318 {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
316 {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ 319 {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
317 {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ 320 {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
318 {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ 321 {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index fa36153619e8..05ed5043254f 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -36,7 +36,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
36 printk(KERN_ERR "%s\n", __FUNCTION__); 36 printk(KERN_ERR "%s\n", __FUNCTION__);
37 37
38 if (drm_dev->driver->suspend) 38 if (drm_dev->driver->suspend)
39 return drm_dev->driver->suspend(drm_dev); 39 return drm_dev->driver->suspend(drm_dev, state);
40 40
41 return 0; 41 return 0;
42} 42}
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index cea4105374b2..3d65c4dcd0c6 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -66,7 +66,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
66} 66}
67 67
68/** 68/**
69 * \c nopage method for AGP virtual memory. 69 * \c fault method for AGP virtual memory.
70 * 70 *
71 * \param vma virtual memory area. 71 * \param vma virtual memory area.
72 * \param address access address. 72 * \param address access address.
@@ -76,8 +76,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
76 * map, get the page, increment the use count and return it. 76 * map, get the page, increment the use count and return it.
77 */ 77 */
78#if __OS_HAS_AGP 78#if __OS_HAS_AGP
79static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, 79static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
80 unsigned long address)
81{ 80{
82 struct drm_file *priv = vma->vm_file->private_data; 81 struct drm_file *priv = vma->vm_file->private_data;
83 struct drm_device *dev = priv->head->dev; 82 struct drm_device *dev = priv->head->dev;
@@ -89,19 +88,24 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
89 * Find the right map 88 * Find the right map
90 */ 89 */
91 if (!drm_core_has_AGP(dev)) 90 if (!drm_core_has_AGP(dev))
92 goto vm_nopage_error; 91 goto vm_fault_error;
93 92
94 if (!dev->agp || !dev->agp->cant_use_aperture) 93 if (!dev->agp || !dev->agp->cant_use_aperture)
95 goto vm_nopage_error; 94 goto vm_fault_error;
96 95
97 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) 96 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
98 goto vm_nopage_error; 97 goto vm_fault_error;
99 98
100 r_list = drm_hash_entry(hash, struct drm_map_list, hash); 99 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
101 map = r_list->map; 100 map = r_list->map;
102 101
103 if (map && map->type == _DRM_AGP) { 102 if (map && map->type == _DRM_AGP) {
104 unsigned long offset = address - vma->vm_start; 103 /*
104 * Using vm_pgoff as a selector forces us to use this unusual
105 * addressing scheme.
106 */
107 unsigned long offset = (unsigned long)vmf->virtual_address -
108 vma->vm_start;
105 unsigned long baddr = map->offset + offset; 109 unsigned long baddr = map->offset + offset;
106 struct drm_agp_mem *agpmem; 110 struct drm_agp_mem *agpmem;
107 struct page *page; 111 struct page *page;
@@ -123,7 +127,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
123 } 127 }
124 128
125 if (!agpmem) 129 if (!agpmem)
126 goto vm_nopage_error; 130 goto vm_fault_error;
127 131
128 /* 132 /*
129 * Get the page, inc the use count, and return it 133 * Get the page, inc the use count, and return it
@@ -131,22 +135,21 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
131 offset = (baddr - agpmem->bound) >> PAGE_SHIFT; 135 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
132 page = virt_to_page(__va(agpmem->memory->memory[offset])); 136 page = virt_to_page(__va(agpmem->memory->memory[offset]));
133 get_page(page); 137 get_page(page);
138 vmf->page = page;
134 139
135 DRM_DEBUG 140 DRM_DEBUG
136 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", 141 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
137 baddr, __va(agpmem->memory->memory[offset]), offset, 142 baddr, __va(agpmem->memory->memory[offset]), offset,
138 page_count(page)); 143 page_count(page));
139 144 return 0;
140 return page;
141 } 145 }
142 vm_nopage_error: 146vm_fault_error:
143 return NOPAGE_SIGBUS; /* Disallow mremap */ 147 return VM_FAULT_SIGBUS; /* Disallow mremap */
144} 148}
145#else /* __OS_HAS_AGP */ 149#else /* __OS_HAS_AGP */
146static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, 150static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
147 unsigned long address)
148{ 151{
149 return NOPAGE_SIGBUS; 152 return VM_FAULT_SIGBUS;
150} 153}
151#endif /* __OS_HAS_AGP */ 154#endif /* __OS_HAS_AGP */
152 155
@@ -160,28 +163,26 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
160 * Get the mapping, find the real physical page to map, get the page, and 163 * Get the mapping, find the real physical page to map, get the page, and
161 * return it. 164 * return it.
162 */ 165 */
163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, 166static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
164 unsigned long address)
165{ 167{
166 struct drm_map *map = (struct drm_map *) vma->vm_private_data; 168 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
167 unsigned long offset; 169 unsigned long offset;
168 unsigned long i; 170 unsigned long i;
169 struct page *page; 171 struct page *page;
170 172
171 if (address > vma->vm_end)
172 return NOPAGE_SIGBUS; /* Disallow mremap */
173 if (!map) 173 if (!map)
174 return NOPAGE_SIGBUS; /* Nothing allocated */ 174 return VM_FAULT_SIGBUS; /* Nothing allocated */
175 175
176 offset = address - vma->vm_start; 176 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
177 i = (unsigned long)map->handle + offset; 177 i = (unsigned long)map->handle + offset;
178 page = vmalloc_to_page((void *)i); 178 page = vmalloc_to_page((void *)i);
179 if (!page) 179 if (!page)
180 return NOPAGE_SIGBUS; 180 return VM_FAULT_SIGBUS;
181 get_page(page); 181 get_page(page);
182 vmf->page = page;
182 183
183 DRM_DEBUG("0x%lx\n", address); 184 DRM_DEBUG("shm_fault 0x%lx\n", offset);
184 return page; 185 return 0;
185} 186}
186 187
187/** 188/**
@@ -263,7 +264,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
263} 264}
264 265
265/** 266/**
266 * \c nopage method for DMA virtual memory. 267 * \c fault method for DMA virtual memory.
267 * 268 *
268 * \param vma virtual memory area. 269 * \param vma virtual memory area.
269 * \param address access address. 270 * \param address access address.
@@ -271,8 +272,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
271 * 272 *
272 * Determine the page number from the page offset and get it from drm_device_dma::pagelist. 273 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
273 */ 274 */
274static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, 275static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
275 unsigned long address)
276{ 276{
277 struct drm_file *priv = vma->vm_file->private_data; 277 struct drm_file *priv = vma->vm_file->private_data;
278 struct drm_device *dev = priv->head->dev; 278 struct drm_device *dev = priv->head->dev;
@@ -282,24 +282,23 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
282 struct page *page; 282 struct page *page;
283 283
284 if (!dma) 284 if (!dma)
285 return NOPAGE_SIGBUS; /* Error */ 285 return VM_FAULT_SIGBUS; /* Error */
286 if (address > vma->vm_end)
287 return NOPAGE_SIGBUS; /* Disallow mremap */
288 if (!dma->pagelist) 286 if (!dma->pagelist)
289 return NOPAGE_SIGBUS; /* Nothing allocated */ 287 return VM_FAULT_SIGBUS; /* Nothing allocated */
290 288
291 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ 289 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
292 page_nr = offset >> PAGE_SHIFT; 290 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
293 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); 291 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
294 292
295 get_page(page); 293 get_page(page);
294 vmf->page = page;
296 295
297 DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr); 296 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
298 return page; 297 return 0;
299} 298}
300 299
301/** 300/**
302 * \c nopage method for scatter-gather virtual memory. 301 * \c fault method for scatter-gather virtual memory.
303 * 302 *
304 * \param vma virtual memory area. 303 * \param vma virtual memory area.
305 * \param address access address. 304 * \param address access address.
@@ -307,8 +306,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
307 * 306 *
308 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. 307 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
309 */ 308 */
310static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, 309static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
311 unsigned long address)
312{ 310{
313 struct drm_map *map = (struct drm_map *) vma->vm_private_data; 311 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
314 struct drm_file *priv = vma->vm_file->private_data; 312 struct drm_file *priv = vma->vm_file->private_data;
@@ -320,77 +318,64 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
320 struct page *page; 318 struct page *page;
321 319
322 if (!entry) 320 if (!entry)
323 return NOPAGE_SIGBUS; /* Error */ 321 return VM_FAULT_SIGBUS; /* Error */
324 if (address > vma->vm_end)
325 return NOPAGE_SIGBUS; /* Disallow mremap */
326 if (!entry->pagelist) 322 if (!entry->pagelist)
327 return NOPAGE_SIGBUS; /* Nothing allocated */ 323 return VM_FAULT_SIGBUS; /* Nothing allocated */
328 324
329 offset = address - vma->vm_start; 325 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
330 map_offset = map->offset - (unsigned long)dev->sg->virtual; 326 map_offset = map->offset - (unsigned long)dev->sg->virtual;
331 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); 327 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
332 page = entry->pagelist[page_offset]; 328 page = entry->pagelist[page_offset];
333 get_page(page); 329 get_page(page);
330 vmf->page = page;
334 331
335 return page; 332 return 0;
336} 333}
337 334
338static struct page *drm_vm_nopage(struct vm_area_struct *vma, 335static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
339 unsigned long address, int *type)
340{ 336{
341 if (type) 337 return drm_do_vm_fault(vma, vmf);
342 *type = VM_FAULT_MINOR;
343 return drm_do_vm_nopage(vma, address);
344} 338}
345 339
346static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, 340static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
347 unsigned long address, int *type)
348{ 341{
349 if (type) 342 return drm_do_vm_shm_fault(vma, vmf);
350 *type = VM_FAULT_MINOR;
351 return drm_do_vm_shm_nopage(vma, address);
352} 343}
353 344
354static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, 345static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
355 unsigned long address, int *type)
356{ 346{
357 if (type) 347 return drm_do_vm_dma_fault(vma, vmf);
358 *type = VM_FAULT_MINOR;
359 return drm_do_vm_dma_nopage(vma, address);
360} 348}
361 349
362static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, 350static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
363 unsigned long address, int *type)
364{ 351{
365 if (type) 352 return drm_do_vm_sg_fault(vma, vmf);
366 *type = VM_FAULT_MINOR;
367 return drm_do_vm_sg_nopage(vma, address);
368} 353}
369 354
370/** AGP virtual memory operations */ 355/** AGP virtual memory operations */
371static struct vm_operations_struct drm_vm_ops = { 356static struct vm_operations_struct drm_vm_ops = {
372 .nopage = drm_vm_nopage, 357 .fault = drm_vm_fault,
373 .open = drm_vm_open, 358 .open = drm_vm_open,
374 .close = drm_vm_close, 359 .close = drm_vm_close,
375}; 360};
376 361
377/** Shared virtual memory operations */ 362/** Shared virtual memory operations */
378static struct vm_operations_struct drm_vm_shm_ops = { 363static struct vm_operations_struct drm_vm_shm_ops = {
379 .nopage = drm_vm_shm_nopage, 364 .fault = drm_vm_shm_fault,
380 .open = drm_vm_open, 365 .open = drm_vm_open,
381 .close = drm_vm_shm_close, 366 .close = drm_vm_shm_close,
382}; 367};
383 368
384/** DMA virtual memory operations */ 369/** DMA virtual memory operations */
385static struct vm_operations_struct drm_vm_dma_ops = { 370static struct vm_operations_struct drm_vm_dma_ops = {
386 .nopage = drm_vm_dma_nopage, 371 .fault = drm_vm_dma_fault,
387 .open = drm_vm_open, 372 .open = drm_vm_open,
388 .close = drm_vm_close, 373 .close = drm_vm_close,
389}; 374};
390 375
391/** Scatter-gather virtual memory operations */ 376/** Scatter-gather virtual memory operations */
392static struct vm_operations_struct drm_vm_sg_ops = { 377static struct vm_operations_struct drm_vm_sg_ops = {
393 .nopage = drm_vm_sg_nopage, 378 .fault = drm_vm_sg_fault,
394 .open = drm_vm_open, 379 .open = drm_vm_open,
395 .close = drm_vm_close, 380 .close = drm_vm_close,
396}; 381};
@@ -604,7 +589,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
604 /* 589 /*
605 * On some platforms we can't talk to bus dma address from the CPU, so for 590 * On some platforms we can't talk to bus dma address from the CPU, so for
606 * memory of type DRM_AGP, we'll deal with sorting out the real physical 591 * memory of type DRM_AGP, we'll deal with sorting out the real physical
607 * pages and mappings in nopage() 592 * pages and mappings in fault()
608 */ 593 */
609#if defined(__powerpc__) 594#if defined(__powerpc__)
610 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 595 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
@@ -634,7 +619,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
634 break; 619 break;
635 case _DRM_CONSISTENT: 620 case _DRM_CONSISTENT:
636 /* Consistent memory is really like shared memory. But 621 /* Consistent memory is really like shared memory. But
637 * it's allocated in a different way, so avoid nopage */ 622 * it's allocated in a different way, so avoid fault */
638 if (remap_pfn_range(vma, vma->vm_start, 623 if (remap_pfn_range(vma, vma->vm_start,
639 page_to_pfn(virt_to_page(map->handle)), 624 page_to_pfn(virt_to_page(map->handle)),
640 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 625 vma->vm_end - vma->vm_start, vma->vm_page_prot))
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 379cbdad4921..9df08105f4f3 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -36,7 +36,7 @@
36#include "i830_drm.h" 36#include "i830_drm.h"
37#include "i830_drv.h" 37#include "i830_drv.h"
38#include <linux/interrupt.h> /* For task queue support */ 38#include <linux/interrupt.h> /* For task queue support */
39#include <linux/pagemap.h> /* For FASTCALL on unlock_page() */ 39#include <linux/pagemap.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <asm/uaccess.h> 41#include <asm/uaccess.h>
42 42
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index 43986d81ae34..e9d6663bec73 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -171,7 +171,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
171 dev_priv->allow_batchbuffer = 1; 171 dev_priv->allow_batchbuffer = 1;
172 172
173 /* Program Hardware Status Page */ 173 /* Program Hardware Status Page */
174 if (!IS_G33(dev)) { 174 if (!I915_NEED_GFX_HWS(dev)) {
175 dev_priv->status_page_dmah = 175 dev_priv->status_page_dmah =
176 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 176 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
177 177
@@ -720,6 +720,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
720 drm_i915_private_t *dev_priv = dev->dev_private; 720 drm_i915_private_t *dev_priv = dev->dev_private;
721 drm_i915_hws_addr_t *hws = data; 721 drm_i915_hws_addr_t *hws = data;
722 722
723 if (!I915_NEED_GFX_HWS(dev))
724 return -EINVAL;
725
723 if (!dev_priv) { 726 if (!dev_priv) {
724 DRM_ERROR("called with no initialization\n"); 727 DRM_ERROR("called with no initialization\n");
725 return -EINVAL; 728 return -EINVAL;
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 52e51033d32c..b2b451dc4460 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -160,6 +160,7 @@ static void i915_save_vga(struct drm_device *dev)
160 dev_priv->saveAR[i] = i915_read_ar(st01, i, 0); 160 dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
161 inb(st01); 161 inb(st01);
162 outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX); 162 outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
163 inb(st01);
163 164
164 /* Graphics controller registers */ 165 /* Graphics controller registers */
165 for (i = 0; i < 9; i++) 166 for (i = 0; i < 9; i++)
@@ -221,10 +222,12 @@ static void i915_restore_vga(struct drm_device *dev)
221 dev_priv->saveGR[0x18]); 222 dev_priv->saveGR[0x18]);
222 223
223 /* Attribute controller registers */ 224 /* Attribute controller registers */
225 inb(st01);
224 for (i = 0; i < 20; i++) 226 for (i = 0; i < 20; i++)
225 i915_write_ar(st01, i, dev_priv->saveAR[i], 0); 227 i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
226 inb(st01); /* switch back to index mode */ 228 inb(st01); /* switch back to index mode */
227 outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX); 229 outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
230 inb(st01);
228 231
229 /* VGA color palette registers */ 232 /* VGA color palette registers */
230 outb(dev_priv->saveDACMASK, VGA_DACMASK); 233 outb(dev_priv->saveDACMASK, VGA_DACMASK);
@@ -236,7 +239,7 @@ static void i915_restore_vga(struct drm_device *dev)
236 239
237} 240}
238 241
239static int i915_suspend(struct drm_device *dev) 242static int i915_suspend(struct drm_device *dev, pm_message_t state)
240{ 243{
241 struct drm_i915_private *dev_priv = dev->dev_private; 244 struct drm_i915_private *dev_priv = dev->dev_private;
242 int i; 245 int i;
@@ -247,6 +250,9 @@ static int i915_suspend(struct drm_device *dev)
247 return -ENODEV; 250 return -ENODEV;
248 } 251 }
249 252
253 if (state.event == PM_EVENT_PRETHAW)
254 return 0;
255
250 pci_save_state(dev->pdev); 256 pci_save_state(dev->pdev);
251 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 257 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
252 258
@@ -276,6 +282,7 @@ static int i915_suspend(struct drm_device *dev)
276 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); 282 dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
277 } 283 }
278 i915_save_palette(dev, PIPE_A); 284 i915_save_palette(dev, PIPE_A);
285 dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
279 286
280 /* Pipe & plane B info */ 287 /* Pipe & plane B info */
281 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 288 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
@@ -303,6 +310,7 @@ static int i915_suspend(struct drm_device *dev)
303 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); 310 dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
304 } 311 }
305 i915_save_palette(dev, PIPE_B); 312 i915_save_palette(dev, PIPE_B);
313 dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
306 314
307 /* CRT state */ 315 /* CRT state */
308 dev_priv->saveADPA = I915_READ(ADPA); 316 dev_priv->saveADPA = I915_READ(ADPA);
@@ -329,12 +337,26 @@ static int i915_suspend(struct drm_device *dev)
329 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 337 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
330 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 338 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
331 339
340 /* Interrupt state */
341 dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
342 dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
343 dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
344
332 /* VGA state */ 345 /* VGA state */
333 dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0); 346 dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
334 dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1); 347 dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
335 dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV); 348 dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
336 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 349 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
337 350
351 /* Clock gating state */
352 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
353
354 /* Cache mode state */
355 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
356
357 /* Memory Arbitration state */
358 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
359
338 /* Scratch space */ 360 /* Scratch space */
339 for (i = 0; i < 16; i++) { 361 for (i = 0; i < 16; i++) {
340 dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2)); 362 dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
@@ -345,9 +367,11 @@ static int i915_suspend(struct drm_device *dev)
345 367
346 i915_save_vga(dev); 368 i915_save_vga(dev);
347 369
348 /* Shut down the device */ 370 if (state.event == PM_EVENT_SUSPEND) {
349 pci_disable_device(dev->pdev); 371 /* Shut down the device */
350 pci_set_power_state(dev->pdev, PCI_D3hot); 372 pci_disable_device(dev->pdev);
373 pci_set_power_state(dev->pdev, PCI_D3hot);
374 }
351 375
352 return 0; 376 return 0;
353} 377}
@@ -400,9 +424,7 @@ static int i915_resume(struct drm_device *dev)
400 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); 424 I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
401 } 425 }
402 426
403 if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) && 427 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
404 (dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS))
405 I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
406 428
407 i915_restore_palette(dev, PIPE_A); 429 i915_restore_palette(dev, PIPE_A);
408 /* Enable the plane */ 430 /* Enable the plane */
@@ -444,10 +466,9 @@ static int i915_resume(struct drm_device *dev)
444 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); 466 I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
445 } 467 }
446 468
447 if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) && 469 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
448 (dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS)) 470
449 I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); 471 i915_restore_palette(dev, PIPE_B);
450 i915_restore_palette(dev, PIPE_A);
451 /* Enable the plane */ 472 /* Enable the plane */
452 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 473 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
453 I915_WRITE(DSPBBASE, I915_READ(DSPBBASE)); 474 I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
@@ -485,6 +506,15 @@ static int i915_resume(struct drm_device *dev)
485 I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV); 506 I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
486 udelay(150); 507 udelay(150);
487 508
509 /* Clock gating state */
510 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
511
512 /* Cache mode state */
513 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
514
515 /* Memory arbitration state */
516 I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
517
488 for (i = 0; i < 16; i++) { 518 for (i = 0; i < 16; i++) {
489 I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]); 519 I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
490 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); 520 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index f8308bfb2613..c10d128e34db 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -134,6 +134,7 @@ typedef struct drm_i915_private {
134 u32 saveVBLANK_A; 134 u32 saveVBLANK_A;
135 u32 saveVSYNC_A; 135 u32 saveVSYNC_A;
136 u32 saveBCLRPAT_A; 136 u32 saveBCLRPAT_A;
137 u32 savePIPEASTAT;
137 u32 saveDSPASTRIDE; 138 u32 saveDSPASTRIDE;
138 u32 saveDSPASIZE; 139 u32 saveDSPASIZE;
139 u32 saveDSPAPOS; 140 u32 saveDSPAPOS;
@@ -154,6 +155,7 @@ typedef struct drm_i915_private {
154 u32 saveVBLANK_B; 155 u32 saveVBLANK_B;
155 u32 saveVSYNC_B; 156 u32 saveVSYNC_B;
156 u32 saveBCLRPAT_B; 157 u32 saveBCLRPAT_B;
158 u32 savePIPEBSTAT;
157 u32 saveDSPBSTRIDE; 159 u32 saveDSPBSTRIDE;
158 u32 saveDSPBSIZE; 160 u32 saveDSPBSIZE;
159 u32 saveDSPBPOS; 161 u32 saveDSPBPOS;
@@ -182,6 +184,12 @@ typedef struct drm_i915_private {
182 u32 saveFBC_LL_BASE; 184 u32 saveFBC_LL_BASE;
183 u32 saveFBC_CONTROL; 185 u32 saveFBC_CONTROL;
184 u32 saveFBC_CONTROL2; 186 u32 saveFBC_CONTROL2;
187 u32 saveIER;
188 u32 saveIIR;
189 u32 saveIMR;
190 u32 saveCACHE_MODE_0;
191 u32 saveDSPCLK_GATE_D;
192 u32 saveMI_ARB_STATE;
185 u32 saveSWF0[16]; 193 u32 saveSWF0[16];
186 u32 saveSWF1[16]; 194 u32 saveSWF1[16];
187 u32 saveSWF2[3]; 195 u32 saveSWF2[3];
@@ -450,6 +458,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
450 */ 458 */
451#define DMA_FADD_S 0x20d4 459#define DMA_FADD_S 0x20d4
452 460
461/* Memory Interface Arbitration State
462 */
463#define MI_ARB_STATE 0x20e4
464
453/* Cache mode 0 reg. 465/* Cache mode 0 reg.
454 * - Manipulating render cache behaviour is central 466 * - Manipulating render cache behaviour is central
455 * to the concept of zone rendering, tuning this reg can help avoid 467 * to the concept of zone rendering, tuning this reg can help avoid
@@ -460,6 +472,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
460 * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set. 472 * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
461 */ 473 */
462#define Cache_Mode_0 0x2120 474#define Cache_Mode_0 0x2120
475#define CACHE_MODE_0 0x2120
463#define CM0_MASK_SHIFT 16 476#define CM0_MASK_SHIFT 16
464#define CM0_IZ_OPT_DISABLE (1<<6) 477#define CM0_IZ_OPT_DISABLE (1<<6)
465#define CM0_ZR_OPT_DISABLE (1<<5) 478#define CM0_ZR_OPT_DISABLE (1<<5)
@@ -655,6 +668,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
655/** P1 value is 2 greater than this field */ 668/** P1 value is 2 greater than this field */
656# define VGA0_PD_P1_MASK (0x1f << 0) 669# define VGA0_PD_P1_MASK (0x1f << 0)
657 670
671#define DSPCLK_GATE_D 0x6200
672
658/* I830 CRTC registers */ 673/* I830 CRTC registers */
659#define HTOTAL_A 0x60000 674#define HTOTAL_A 0x60000
660#define HBLANK_A 0x60004 675#define HBLANK_A 0x60004
@@ -1101,6 +1116,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1101#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 1116#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
1102 IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev)) 1117 IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
1103 1118
1119#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev))
1120
1104#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1121#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1105 1122
1106#endif 1123#endif
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 5dc799ab86b8..833abc7e55fb 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -825,11 +825,19 @@ static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
825 return ret; 825 return ret;
826} 826}
827 827
828static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
829{
830 RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
831 return RADEON_READ(RS690_MC_DATA);
832}
833
828u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) 834u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
829{ 835{
830 836
831 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 837 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
832 return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); 838 return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
839 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
840 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
833 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 841 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
834 return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); 842 return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
835 else 843 else
@@ -840,6 +848,8 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
840{ 848{
841 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 849 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
842 RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); 850 RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
851 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
852 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
843 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 853 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
844 RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); 854 RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
845 else 855 else
@@ -850,6 +860,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
850{ 860{
851 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 861 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
852 RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); 862 RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
863 else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
864 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
853 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 865 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
854 RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); 866 RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
855 else 867 else
@@ -1362,6 +1374,70 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
1362 } 1374 }
1363} 1375}
1364 1376
1377/* Enable or disable RS690 GART on the chip */
1378static void radeon_set_rs690gart(drm_radeon_private_t *dev_priv, int on)
1379{
1380 u32 temp;
1381
1382 if (on) {
1383 DRM_DEBUG("programming rs690 gart %08X %08lX %08X\n",
1384 dev_priv->gart_vm_start,
1385 (long)dev_priv->gart_info.bus_addr,
1386 dev_priv->gart_size);
1387
1388 temp = RS690_READ_MCIND(dev_priv, RS690_MC_MISC_CNTL);
1389 RS690_WRITE_MCIND(RS690_MC_MISC_CNTL, 0x5000);
1390
1391 RS690_WRITE_MCIND(RS690_MC_AGP_SIZE,
1392 RS690_MC_GART_EN | RS690_MC_AGP_SIZE_32MB);
1393
1394 temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_FEATURE_ID);
1395 RS690_WRITE_MCIND(RS690_MC_GART_FEATURE_ID, 0x42040800);
1396
1397 RS690_WRITE_MCIND(RS690_MC_GART_BASE,
1398 dev_priv->gart_info.bus_addr);
1399
1400 temp = RS690_READ_MCIND(dev_priv, RS690_MC_AGP_MODE_CONTROL);
1401 RS690_WRITE_MCIND(RS690_MC_AGP_MODE_CONTROL, 0x01400000);
1402
1403 RS690_WRITE_MCIND(RS690_MC_AGP_BASE,
1404 (unsigned int)dev_priv->gart_vm_start);
1405
1406 dev_priv->gart_size = 32*1024*1024;
1407 temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
1408 0xffff0000) | (dev_priv->gart_vm_start >> 16));
1409
1410 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, temp);
1411
1412 temp = RS690_READ_MCIND(dev_priv, RS690_MC_AGP_SIZE);
1413 RS690_WRITE_MCIND(RS690_MC_AGP_SIZE,
1414 RS690_MC_GART_EN | RS690_MC_AGP_SIZE_32MB);
1415
1416 do {
1417 temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_CACHE_CNTL);
1418 if ((temp & RS690_MC_GART_CLEAR_STATUS) ==
1419 RS690_MC_GART_CLEAR_DONE)
1420 break;
1421 DRM_UDELAY(1);
1422 } while (1);
1423
1424 RS690_WRITE_MCIND(RS690_MC_GART_CACHE_CNTL,
1425 RS690_MC_GART_CC_CLEAR);
1426 do {
1427 temp = RS690_READ_MCIND(dev_priv, RS690_MC_GART_CACHE_CNTL);
1428 if ((temp & RS690_MC_GART_CLEAR_STATUS) ==
1429 RS690_MC_GART_CLEAR_DONE)
1430 break;
1431 DRM_UDELAY(1);
1432 } while (1);
1433
1434 RS690_WRITE_MCIND(RS690_MC_GART_CACHE_CNTL,
1435 RS690_MC_GART_CC_NO_CHANGE);
1436 } else {
1437 RS690_WRITE_MCIND(RS690_MC_AGP_SIZE, RS690_MC_GART_DIS);
1438 }
1439}
1440
1365static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) 1441static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
1366{ 1442{
1367 u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); 1443 u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
@@ -1396,6 +1472,11 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
1396{ 1472{
1397 u32 tmp; 1473 u32 tmp;
1398 1474
1475 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
1476 radeon_set_rs690gart(dev_priv, on);
1477 return;
1478 }
1479
1399 if (dev_priv->flags & RADEON_IS_IGPGART) { 1480 if (dev_priv->flags & RADEON_IS_IGPGART) {
1400 radeon_set_igpgart(dev_priv, on); 1481 radeon_set_igpgart(dev_priv, on);
1401 return; 1482 return;
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 4434332c79bc..173ae620223a 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -123,6 +123,7 @@ enum radeon_family {
123 CHIP_R420, 123 CHIP_R420,
124 CHIP_RV410, 124 CHIP_RV410,
125 CHIP_RS400, 125 CHIP_RS400,
126 CHIP_RS690,
126 CHIP_RV515, 127 CHIP_RV515,
127 CHIP_R520, 128 CHIP_R520,
128 CHIP_RV530, 129 CHIP_RV530,
@@ -467,6 +468,36 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
467#define RADEON_IGPGART_ENABLE 0x38 468#define RADEON_IGPGART_ENABLE 0x38
468#define RADEON_IGPGART_UNK_39 0x39 469#define RADEON_IGPGART_UNK_39 0x39
469 470
471#define RS690_MC_INDEX 0x78
472# define RS690_MC_INDEX_MASK 0x1ff
473# define RS690_MC_INDEX_WR_EN (1 << 9)
474# define RS690_MC_INDEX_WR_ACK 0x7f
475#define RS690_MC_DATA 0x7c
476
477#define RS690_MC_MISC_CNTL 0x18
478#define RS690_MC_GART_FEATURE_ID 0x2b
479#define RS690_MC_GART_BASE 0x2c
480#define RS690_MC_GART_CACHE_CNTL 0x2e
481# define RS690_MC_GART_CC_NO_CHANGE 0x0
482# define RS690_MC_GART_CC_CLEAR 0x1
483# define RS690_MC_GART_CLEAR_STATUS (1 << 1)
484# define RS690_MC_GART_CLEAR_DONE (0 << 1)
485# define RS690_MC_GART_CLEAR_PENDING (1 << 1)
486#define RS690_MC_AGP_SIZE 0x38
487# define RS690_MC_GART_DIS 0x0
488# define RS690_MC_GART_EN 0x1
489# define RS690_MC_AGP_SIZE_32MB (0 << 1)
490# define RS690_MC_AGP_SIZE_64MB (1 << 1)
491# define RS690_MC_AGP_SIZE_128MB (2 << 1)
492# define RS690_MC_AGP_SIZE_256MB (3 << 1)
493# define RS690_MC_AGP_SIZE_512MB (4 << 1)
494# define RS690_MC_AGP_SIZE_1GB (5 << 1)
495# define RS690_MC_AGP_SIZE_2GB (6 << 1)
496#define RS690_MC_AGP_MODE_CONTROL 0x39
497#define RS690_MC_FB_LOCATION 0x100
498#define RS690_MC_AGP_LOCATION 0x101
499#define RS690_MC_AGP_BASE 0x102
500
470#define R520_MC_IND_INDEX 0x70 501#define R520_MC_IND_INDEX 0x70
471#define R520_MC_IND_WR_EN (1<<24) 502#define R520_MC_IND_WR_EN (1<<24)
472#define R520_MC_IND_DATA 0x74 503#define R520_MC_IND_DATA 0x74
@@ -1076,6 +1107,13 @@ do { \
1076 RADEON_WRITE(R520_MC_IND_INDEX, 0); \ 1107 RADEON_WRITE(R520_MC_IND_INDEX, 0); \
1077 } while (0) 1108 } while (0)
1078 1109
1110#define RS690_WRITE_MCIND( addr, val ) \
1111do { \
1112 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK)); \
1113 RADEON_WRITE(RS690_MC_DATA, val); \
1114 RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); \
1115} while (0)
1116
1079#define CP_PACKET0( reg, n ) \ 1117#define CP_PACKET0( reg, n ) \
1080 (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2)) 1118 (RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
1081#define CP_PACKET0_TABLE( reg, n ) \ 1119#define CP_PACKET0_TABLE( reg, n ) \
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
index bb09413d5a21..88590d040046 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/char/hvc_rtas.c
@@ -76,7 +76,7 @@ static struct hv_ops hvc_rtas_get_put_ops = {
76 .put_chars = hvc_rtas_write_console, 76 .put_chars = hvc_rtas_write_console,
77}; 77};
78 78
79static int hvc_rtas_init(void) 79static int __init hvc_rtas_init(void)
80{ 80{
81 struct hvc_struct *hp; 81 struct hvc_struct *hp;
82 82
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index 00b8a84b0319..ffa0efce0aed 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -45,7 +45,7 @@ config CARDMAN_4040
45 45
46config IPWIRELESS 46config IPWIRELESS
47 tristate "IPWireless 3G UMTS PCMCIA card support" 47 tristate "IPWireless 3G UMTS PCMCIA card support"
48 depends on PCMCIA 48 depends on PCMCIA && NETDEVICES
49 select PPP 49 select PPP
50 help 50 help
51 This is a driver for 3G UMTS PCMCIA card from IPWireless company. In 51 This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 64926aa990db..89a29cd93783 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1006,14 +1006,6 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev)
1006 } 1006 }
1007#endif 1007#endif
1008 1008
1009
1010 if (!kobject_get(&data->kobj)) {
1011 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1012 cpufreq_debug_enable_ratelimit();
1013 unlock_policy_rwsem_write(cpu);
1014 return -EFAULT;
1015 }
1016
1017#ifdef CONFIG_SMP 1009#ifdef CONFIG_SMP
1018 1010
1019#ifdef CONFIG_HOTPLUG_CPU 1011#ifdef CONFIG_HOTPLUG_CPU
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 60f71e6345e3..d73663a52324 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -219,7 +219,8 @@ static void poll_idle_init(struct cpuidle_device *dev)
219 219
220 cpuidle_set_statedata(state, NULL); 220 cpuidle_set_statedata(state, NULL);
221 221
222 snprintf(state->name, CPUIDLE_NAME_LEN, "C0 (poll idle)"); 222 snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
223 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
223 state->exit_latency = 0; 224 state->exit_latency = 0;
224 state->target_residency = 0; 225 state->target_residency = 0;
225 state->power_usage = -1; 226 state->power_usage = -1;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 088ea74edd34..69102ca05685 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -218,16 +218,23 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
218 return sprintf(buf, "%u\n", state->_name);\ 218 return sprintf(buf, "%u\n", state->_name);\
219} 219}
220 220
221static ssize_t show_state_name(struct cpuidle_state *state, char *buf) 221#define define_show_state_str_function(_name) \
222{ 222static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
223 return sprintf(buf, "%s\n", state->name); 223{ \
224 if (state->_name[0] == '\0')\
225 return sprintf(buf, "<null>\n");\
226 return sprintf(buf, "%s\n", state->_name);\
224} 227}
225 228
226define_show_state_function(exit_latency) 229define_show_state_function(exit_latency)
227define_show_state_function(power_usage) 230define_show_state_function(power_usage)
228define_show_state_function(usage) 231define_show_state_function(usage)
229define_show_state_function(time) 232define_show_state_function(time)
233define_show_state_str_function(name)
234define_show_state_str_function(desc)
235
230define_one_state_ro(name, show_state_name); 236define_one_state_ro(name, show_state_name);
237define_one_state_ro(desc, show_state_desc);
231define_one_state_ro(latency, show_state_exit_latency); 238define_one_state_ro(latency, show_state_exit_latency);
232define_one_state_ro(power, show_state_power_usage); 239define_one_state_ro(power, show_state_power_usage);
233define_one_state_ro(usage, show_state_usage); 240define_one_state_ro(usage, show_state_usage);
@@ -235,6 +242,7 @@ define_one_state_ro(time, show_state_time);
235 242
236static struct attribute *cpuidle_state_default_attrs[] = { 243static struct attribute *cpuidle_state_default_attrs[] = {
237 &attr_name.attr, 244 &attr_name.attr,
245 &attr_desc.attr,
238 &attr_latency.attr, 246 &attr_latency.attr,
239 &attr_power.attr, 247 &attr_power.attr,
240 &attr_usage.attr, 248 &attr_usage.attr,
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index dfbf24c4033c..3110bf7014f7 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -463,7 +463,7 @@ struct hifn_device
463 463
464 unsigned int pk_clk_freq; 464 unsigned int pk_clk_freq;
465 465
466#ifdef CRYPTO_DEV_HIFN_795X_RNG 466#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
467 unsigned int rng_wait_time; 467 unsigned int rng_wait_time;
468 ktime_t rngtime; 468 ktime_t rngtime;
469 struct hwrng rng; 469 struct hwrng rng;
@@ -795,7 +795,7 @@ static struct pci2id {
795 } 795 }
796}; 796};
797 797
798#ifdef CRYPTO_DEV_HIFN_795X_RNG 798#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
799static int hifn_rng_data_present(struct hwrng *rng, int wait) 799static int hifn_rng_data_present(struct hwrng *rng, int wait)
800{ 800{
801 struct hifn_device *dev = (struct hifn_device *)rng->priv; 801 struct hifn_device *dev = (struct hifn_device *)rng->priv;
@@ -880,7 +880,7 @@ static int hifn_init_pubrng(struct hifn_device *dev)
880 dprintk("Chip %s: RNG engine has been successfully initialised.\n", 880 dprintk("Chip %s: RNG engine has been successfully initialised.\n",
881 dev->name); 881 dev->name);
882 882
883#ifdef CRYPTO_DEV_HIFN_795X_RNG 883#ifdef CONFIG_CRYPTO_DEV_HIFN_795X_RNG
884 /* First value must be discarded */ 884 /* First value must be discarded */
885 hifn_read_1(dev, HIFN_1_RNG_DATA); 885 hifn_read_1(dev, HIFN_1_RNG_DATA);
886 dev->rngtime = ktime_get(); 886 dev->rngtime = ktime_get();
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 653265a40b7f..4072449ad1cd 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -10,10 +10,9 @@
10 10
11static char dmi_empty_string[] = " "; 11static char dmi_empty_string[] = " ";
12 12
13static char * __init dmi_string(const struct dmi_header *dm, u8 s) 13static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
14{ 14{
15 const u8 *bp = ((u8 *) dm) + dm->length; 15 const u8 *bp = ((u8 *) dm) + dm->length;
16 char *str = "";
17 16
18 if (s) { 17 if (s) {
19 s--; 18 s--;
@@ -28,14 +27,29 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s)
28 27
29 if (!memcmp(bp, dmi_empty_string, cmp_len)) 28 if (!memcmp(bp, dmi_empty_string, cmp_len))
30 return dmi_empty_string; 29 return dmi_empty_string;
31 str = dmi_alloc(len); 30 return bp;
32 if (str != NULL)
33 strcpy(str, bp);
34 else
35 printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
36 } 31 }
37 } 32 }
38 33
34 return "";
35}
36
37static char * __init dmi_string(const struct dmi_header *dm, u8 s)
38{
39 const char *bp = dmi_string_nosave(dm, s);
40 char *str;
41 size_t len;
42
43 if (bp == dmi_empty_string)
44 return dmi_empty_string;
45
46 len = strlen(bp) + 1;
47 str = dmi_alloc(len);
48 if (str != NULL)
49 strcpy(str, bp);
50 else
51 printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len);
52
39 return str; 53 return str;
40} 54}
41 55
@@ -167,10 +181,30 @@ static void __init dmi_save_type(const struct dmi_header *dm, int slot, int inde
167 dmi_ident[slot] = s; 181 dmi_ident[slot] = s;
168} 182}
169 183
184static void __init dmi_save_one_device(int type, const char *name)
185{
186 struct dmi_device *dev;
187
188 /* No duplicate device */
189 if (dmi_find_device(type, name, NULL))
190 return;
191
192 dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
193 if (!dev) {
194 printk(KERN_ERR "dmi_save_one_device: out of memory.\n");
195 return;
196 }
197
198 dev->type = type;
199 strcpy((char *)(dev + 1), name);
200 dev->name = (char *)(dev + 1);
201 dev->device_data = NULL;
202 list_add(&dev->list, &dmi_devices);
203}
204
170static void __init dmi_save_devices(const struct dmi_header *dm) 205static void __init dmi_save_devices(const struct dmi_header *dm)
171{ 206{
172 int i, count = (dm->length - sizeof(struct dmi_header)) / 2; 207 int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
173 struct dmi_device *dev;
174 208
175 for (i = 0; i < count; i++) { 209 for (i = 0; i < count; i++) {
176 const char *d = (char *)(dm + 1) + (i * 2); 210 const char *d = (char *)(dm + 1) + (i * 2);
@@ -179,23 +213,10 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
179 if ((*d & 0x80) == 0) 213 if ((*d & 0x80) == 0)
180 continue; 214 continue;
181 215
182 dev = dmi_alloc(sizeof(*dev)); 216 dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1)));
183 if (!dev) {
184 printk(KERN_ERR "dmi_save_devices: out of memory.\n");
185 break;
186 }
187
188 dev->type = *d++ & 0x7f;
189 dev->name = dmi_string(dm, *d);
190 dev->device_data = NULL;
191 list_add(&dev->list, &dmi_devices);
192 } 217 }
193} 218}
194 219
195static struct dmi_device empty_oem_string_dev = {
196 .name = dmi_empty_string,
197};
198
199static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) 220static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
200{ 221{
201 int i, count = *(u8 *)(dm + 1); 222 int i, count = *(u8 *)(dm + 1);
@@ -204,10 +225,8 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
204 for (i = 1; i <= count; i++) { 225 for (i = 1; i <= count; i++) {
205 char *devname = dmi_string(dm, i); 226 char *devname = dmi_string(dm, i);
206 227
207 if (!strcmp(devname, dmi_empty_string)) { 228 if (devname == dmi_empty_string)
208 list_add(&empty_oem_string_dev.list, &dmi_devices);
209 continue; 229 continue;
210 }
211 230
212 dev = dmi_alloc(sizeof(*dev)); 231 dev = dmi_alloc(sizeof(*dev));
213 if (!dev) { 232 if (!dev) {
@@ -253,23 +272,12 @@ static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
253static void __init dmi_save_extended_devices(const struct dmi_header *dm) 272static void __init dmi_save_extended_devices(const struct dmi_header *dm)
254{ 273{
255 const u8 *d = (u8*) dm + 5; 274 const u8 *d = (u8*) dm + 5;
256 struct dmi_device *dev;
257 275
258 /* Skip disabled device */ 276 /* Skip disabled device */
259 if ((*d & 0x80) == 0) 277 if ((*d & 0x80) == 0)
260 return; 278 return;
261 279
262 dev = dmi_alloc(sizeof(*dev)); 280 dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d - 1)));
263 if (!dev) {
264 printk(KERN_ERR "dmi_save_extended_devices: out of memory.\n");
265 return;
266 }
267
268 dev->type = *d-- & 0x7f;
269 dev->name = dmi_string(dm, *d);
270 dev->device_data = NULL;
271
272 list_add(&dev->list, &dmi_devices);
273} 281}
274 282
275/* 283/*
diff --git a/drivers/hid/hid-input-quirks.c b/drivers/hid/hid-input-quirks.c
index a870ba58faa3..dceadd0c1419 100644
--- a/drivers/hid/hid-input-quirks.c
+++ b/drivers/hid/hid-input-quirks.c
@@ -352,7 +352,7 @@ int hidinput_mapping_quirks(struct hid_usage *usage,
352 return 0; 352 return 0;
353} 353}
354 354
355void hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) 355int hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value)
356{ 356{
357 struct input_dev *input; 357 struct input_dev *input;
358 358
@@ -362,34 +362,34 @@ void hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, stru
362 || ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_7) && (usage->hid == 0x00090007))) { 362 || ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_7) && (usage->hid == 0x00090007))) {
363 if (value) hid->quirks |= HID_QUIRK_2WHEEL_MOUSE_HACK_ON; 363 if (value) hid->quirks |= HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
364 else hid->quirks &= ~HID_QUIRK_2WHEEL_MOUSE_HACK_ON; 364 else hid->quirks &= ~HID_QUIRK_2WHEEL_MOUSE_HACK_ON;
365 return; 365 return 1;
366 } 366 }
367 367
368 if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_B8) && 368 if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_B8) &&
369 (usage->type == EV_REL) && 369 (usage->type == EV_REL) &&
370 (usage->code == REL_WHEEL)) { 370 (usage->code == REL_WHEEL)) {
371 hid->delayed_value = value; 371 hid->delayed_value = value;
372 return; 372 return 1;
373 } 373 }
374 374
375 if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_B8) && 375 if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_B8) &&
376 (usage->hid == 0x000100b8)) { 376 (usage->hid == 0x000100b8)) {
377 input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, hid->delayed_value); 377 input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, hid->delayed_value);
378 return; 378 return 1;
379 } 379 }
380 380
381 if ((hid->quirks & HID_QUIRK_INVERT_HWHEEL) && (usage->code == REL_HWHEEL)) { 381 if ((hid->quirks & HID_QUIRK_INVERT_HWHEEL) && (usage->code == REL_HWHEEL)) {
382 input_event(input, usage->type, usage->code, -value); 382 input_event(input, usage->type, usage->code, -value);
383 return; 383 return 1;
384 } 384 }
385 385
386 if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_ON) && (usage->code == REL_WHEEL)) { 386 if ((hid->quirks & HID_QUIRK_2WHEEL_MOUSE_HACK_ON) && (usage->code == REL_WHEEL)) {
387 input_event(input, usage->type, REL_HWHEEL, value); 387 input_event(input, usage->type, REL_HWHEEL, value);
388 return; 388 return 1;
389 } 389 }
390 390
391 if ((hid->quirks & HID_QUIRK_APPLE_HAS_FN) && hidinput_apple_event(hid, input, usage, value)) 391 if ((hid->quirks & HID_QUIRK_APPLE_HAS_FN) && hidinput_apple_event(hid, input, usage, value))
392 return; 392 return 1;
393 393
394 /* Handling MS keyboards special buttons */ 394 /* Handling MS keyboards special buttons */
395 if (hid->quirks & HID_QUIRK_MICROSOFT_KEYS && 395 if (hid->quirks & HID_QUIRK_MICROSOFT_KEYS &&
@@ -416,8 +416,9 @@ void hidinput_event_quirks(struct hid_device *hid, struct hid_field *field, stru
416 if (hid->quirks & HID_QUIRK_HWHEEL_WHEEL_INVERT && 416 if (hid->quirks & HID_QUIRK_HWHEEL_WHEEL_INVERT &&
417 usage->type == EV_REL && usage->code == REL_HWHEEL) { 417 usage->type == EV_REL && usage->code == REL_HWHEEL) {
418 input_event(input, usage->type, REL_WHEEL, -value); 418 input_event(input, usage->type, REL_WHEEL, -value);
419 return; 419 return 1;
420 } 420 }
421 return 0;
421} 422}
422 423
423 424
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 5325d98b4328..5a38fb27d69f 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -97,6 +97,7 @@ struct hidinput_key_translation {
97#define APPLE_FLAG_FKEY 0x01 97#define APPLE_FLAG_FKEY 0x01
98 98
99static struct hidinput_key_translation apple_fn_keys[] = { 99static struct hidinput_key_translation apple_fn_keys[] = {
100 { KEY_BACKSPACE, KEY_DELETE },
100 { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY }, 101 { KEY_F1, KEY_BRIGHTNESSDOWN, APPLE_FLAG_FKEY },
101 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY }, 102 { KEY_F2, KEY_BRIGHTNESSUP, APPLE_FLAG_FKEY },
102 { KEY_F3, KEY_CYCLEWINDOWS, APPLE_FLAG_FKEY }, /* Exposé */ 103 { KEY_F3, KEY_CYCLEWINDOWS, APPLE_FLAG_FKEY }, /* Exposé */
@@ -109,6 +110,10 @@ static struct hidinput_key_translation apple_fn_keys[] = {
109 { KEY_F10, KEY_MUTE, APPLE_FLAG_FKEY }, 110 { KEY_F10, KEY_MUTE, APPLE_FLAG_FKEY },
110 { KEY_F11, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY }, 111 { KEY_F11, KEY_VOLUMEDOWN, APPLE_FLAG_FKEY },
111 { KEY_F12, KEY_VOLUMEUP, APPLE_FLAG_FKEY }, 112 { KEY_F12, KEY_VOLUMEUP, APPLE_FLAG_FKEY },
113 { KEY_UP, KEY_PAGEUP },
114 { KEY_DOWN, KEY_PAGEDOWN },
115 { KEY_LEFT, KEY_HOME },
116 { KEY_RIGHT, KEY_END },
112 { } 117 { }
113}; 118};
114 119
@@ -854,7 +859,8 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
854 return; 859 return;
855 860
856 /* handle input events for quirky devices */ 861 /* handle input events for quirky devices */
857 hidinput_event_quirks(hid, field, usage, value); 862 if (hidinput_event_quirks(hid, field, usage, value))
863 return;
858 864
859 if (usage->hat_min < usage->hat_max || usage->hat_dir) { 865 if (usage->hat_min < usage->hat_max || usage->hat_dir) {
860 int hat_dir = usage->hat_dir; 866 int hat_dir = usage->hat_dir;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index b77b61e0cd7b..e6d05f6b1c1c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -66,6 +66,12 @@
66#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220 66#define USB_DEVICE_ID_APPLE_ALU_ANSI 0x0220
67#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221 67#define USB_DEVICE_ID_APPLE_ALU_ISO 0x0221
68#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222 68#define USB_DEVICE_ID_APPLE_ALU_JIS 0x0222
69#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI 0x0229
70#define USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO 0x022a
71#define USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS 0x022b
72#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI 0x022c
73#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO 0x022d
74#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS 0x022e
69#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 75#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
70#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 76#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
71#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 77#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
@@ -193,6 +199,17 @@
193#define USB_DEVICE_ID_GTCO_502 0x0502 199#define USB_DEVICE_ID_GTCO_502 0x0502
194#define USB_DEVICE_ID_GTCO_503 0x0503 200#define USB_DEVICE_ID_GTCO_503 0x0503
195#define USB_DEVICE_ID_GTCO_504 0x0504 201#define USB_DEVICE_ID_GTCO_504 0x0504
202#define USB_DEVICE_ID_GTCO_600 0x0600
203#define USB_DEVICE_ID_GTCO_601 0x0601
204#define USB_DEVICE_ID_GTCO_602 0x0602
205#define USB_DEVICE_ID_GTCO_603 0x0603
206#define USB_DEVICE_ID_GTCO_604 0x0604
207#define USB_DEVICE_ID_GTCO_605 0x0605
208#define USB_DEVICE_ID_GTCO_606 0x0606
209#define USB_DEVICE_ID_GTCO_607 0x0607
210#define USB_DEVICE_ID_GTCO_608 0x0608
211#define USB_DEVICE_ID_GTCO_609 0x0609
212#define USB_DEVICE_ID_GTCO_609 0x0609
196#define USB_DEVICE_ID_GTCO_1000 0x1000 213#define USB_DEVICE_ID_GTCO_1000 0x1000
197#define USB_DEVICE_ID_GTCO_1001 0x1001 214#define USB_DEVICE_ID_GTCO_1001 0x1001
198#define USB_DEVICE_ID_GTCO_1002 0x1002 215#define USB_DEVICE_ID_GTCO_1002 0x1002
@@ -200,7 +217,7 @@
200#define USB_DEVICE_ID_GTCO_1004 0x1004 217#define USB_DEVICE_ID_GTCO_1004 0x1004
201#define USB_DEVICE_ID_GTCO_1005 0x1005 218#define USB_DEVICE_ID_GTCO_1005 0x1005
202#define USB_DEVICE_ID_GTCO_1006 0x1006 219#define USB_DEVICE_ID_GTCO_1006 0x1006
203 220#define USB_DEVICE_ID_GTCO_1007 0x1007
204#define USB_VENDOR_ID_HAPP 0x078b 221#define USB_VENDOR_ID_HAPP 0x078b
205#define USB_DEVICE_ID_UGCI_DRIVING 0x0010 222#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
206#define USB_DEVICE_ID_UGCI_FLYING 0x0020 223#define USB_DEVICE_ID_UGCI_FLYING 0x0020
@@ -368,6 +385,7 @@
368#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002 385#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
369#define USB_DEVICE_ID_VERNIER_SKIP 0x0003 386#define USB_DEVICE_ID_VERNIER_SKIP 0x0003
370#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 387#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
388#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
371 389
372#define USB_VENDOR_ID_WACOM 0x056a 390#define USB_VENDOR_ID_WACOM 0x056a
373 391
@@ -496,6 +514,16 @@ static const struct hid_blacklist {
496 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502, HID_QUIRK_IGNORE }, 514 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502, HID_QUIRK_IGNORE },
497 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503, HID_QUIRK_IGNORE }, 515 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503, HID_QUIRK_IGNORE },
498 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504, HID_QUIRK_IGNORE }, 516 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504, HID_QUIRK_IGNORE },
517 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_600, HID_QUIRK_IGNORE },
518 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_601, HID_QUIRK_IGNORE },
519 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_602, HID_QUIRK_IGNORE },
520 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_603, HID_QUIRK_IGNORE },
521 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_604, HID_QUIRK_IGNORE },
522 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_605, HID_QUIRK_IGNORE },
523 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_606, HID_QUIRK_IGNORE },
524 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_607, HID_QUIRK_IGNORE },
525 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_608, HID_QUIRK_IGNORE },
526 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_609, HID_QUIRK_IGNORE },
499 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000, HID_QUIRK_IGNORE }, 527 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000, HID_QUIRK_IGNORE },
500 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001, HID_QUIRK_IGNORE }, 528 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001, HID_QUIRK_IGNORE },
501 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002, HID_QUIRK_IGNORE }, 529 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002, HID_QUIRK_IGNORE },
@@ -503,6 +531,7 @@ static const struct hid_blacklist {
503 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004, HID_QUIRK_IGNORE }, 531 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004, HID_QUIRK_IGNORE },
504 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005, HID_QUIRK_IGNORE }, 532 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005, HID_QUIRK_IGNORE },
505 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006, HID_QUIRK_IGNORE }, 533 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006, HID_QUIRK_IGNORE },
534 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007, HID_QUIRK_IGNORE },
506 { USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA, HID_QUIRK_IGNORE }, 535 { USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA, HID_QUIRK_IGNORE },
507 { USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO, HID_QUIRK_IGNORE }, 536 { USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO, HID_QUIRK_IGNORE },
508 { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY, HID_QUIRK_IGNORE }, 537 { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY, HID_QUIRK_IGNORE },
@@ -541,6 +570,7 @@ static const struct hid_blacklist {
541 { USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP, HID_QUIRK_IGNORE }, 570 { USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP, HID_QUIRK_IGNORE },
542 { USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP, HID_QUIRK_IGNORE }, 571 { USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP, HID_QUIRK_IGNORE },
543 { USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS, HID_QUIRK_IGNORE }, 572 { USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS, HID_QUIRK_IGNORE },
573 { USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC, HID_QUIRK_IGNORE },
544 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20, HID_QUIRK_IGNORE }, 574 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20, HID_QUIRK_IGNORE },
545 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20, HID_QUIRK_IGNORE }, 575 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20, HID_QUIRK_IGNORE },
546 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT, HID_QUIRK_IGNORE }, 576 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT, HID_QUIRK_IGNORE },
@@ -593,6 +623,12 @@ static const struct hid_blacklist {
593 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI, HID_QUIRK_APPLE_HAS_FN }, 623 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI, HID_QUIRK_APPLE_HAS_FN },
594 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, 624 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
595 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS, HID_QUIRK_APPLE_HAS_FN }, 625 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS, HID_QUIRK_APPLE_HAS_FN },
626 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
627 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE | HID_QUIRK_APPLE_ISO_KEYBOARD },
628 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
629 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_HAS_FN },
630 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
631 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_HAS_FN },
596 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 632 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
597 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 633 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
598 634
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 410ffe4e9d80..368879ff5d8c 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -143,6 +143,16 @@ config SENSORS_ADT7470
143 This driver can also be built as a module. If so, the module 143 This driver can also be built as a module. If so, the module
144 will be called adt7470. 144 will be called adt7470.
145 145
146config SENSORS_ADT7473
147 tristate "Analog Devices ADT7473"
148 depends on I2C && EXPERIMENTAL
149 help
150 If you say yes here you get support for the Analog Devices
151 ADT7473 temperature monitoring chips.
152
153 This driver can also be built as a module. If so, the module
154 will be called adt7473.
155
146config SENSORS_K8TEMP 156config SENSORS_K8TEMP
147 tristate "AMD Athlon64/FX or Opteron temperature sensor" 157 tristate "AMD Athlon64/FX or Opteron temperature sensor"
148 depends on X86 && PCI && EXPERIMENTAL 158 depends on X86 && PCI && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 824161337f1c..3bdb05a5cbd7 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
24obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o 24obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
25obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o 25obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
26obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o 26obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
27obj-$(CONFIG_SENSORS_ADT7473) += adt7473.o
27obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o 28obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
28obj-$(CONFIG_SENSORS_AMS) += ams/ 29obj-$(CONFIG_SENSORS_AMS) += ams/
29obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o 30obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index fcd7fe78f3f9..466b9ee92797 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -26,7 +26,7 @@
26#define DRV_VERSION "0.3" 26#define DRV_VERSION "0.3"
27 27
28/* Addresses to scan */ 28/* Addresses to scan */
29static unsigned short normal_i2c[] = { 0x28, I2C_CLIENT_END }; 29static const unsigned short normal_i2c[] = { 0x28, I2C_CLIENT_END };
30/* Insmod parameters */ 30/* Insmod parameters */
31I2C_CLIENT_INSMOD_3(ad7416, ad7417, ad7418); 31I2C_CLIENT_INSMOD_3(ad7416, ad7417, ad7418);
32 32
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index b96be772e498..ecbf69484bf5 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -31,10 +31,8 @@
31 31
32 32
33/* Addresses to scan */ 33/* Addresses to scan */
34static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 34static const unsigned short normal_i2c[] = {
35 0x29, 0x2a, 0x2b, 35 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
36 0x4c, 0x4d, 0x4e,
37 I2C_CLIENT_END };
38 36
39/* Insmod parameters */ 37/* Insmod parameters */
40I2C_CLIENT_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, 38I2C_CLIENT_INSMOD_8(adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm,
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index e96c3725203d..1d76de7d75c7 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -62,7 +62,7 @@
62 * NE1619 has two possible addresses: 0x2c and 0x2d. 62 * NE1619 has two possible addresses: 0x2c and 0x2d.
63 */ 63 */
64 64
65static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 65static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
66 66
67/* 67/*
68 * Insmod parameters 68 * Insmod parameters
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 8002f68240c4..904c6ce9d83f 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -35,7 +35,7 @@
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36 36
37/* Addresses to scan */ 37/* Addresses to scan */
38static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 38static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
39 39
40/* Insmod parameters */ 40/* Insmod parameters */
41I2C_CLIENT_INSMOD_1(adm1026); 41I2C_CLIENT_INSMOD_1(adm1026);
@@ -1624,6 +1624,7 @@ static struct attribute *adm1026_attributes_temp3[] = {
1624 &dev_attr_temp3_crit_enable.attr, 1624 &dev_attr_temp3_crit_enable.attr,
1625 &dev_attr_temp3_auto_point1_pwm.attr, 1625 &dev_attr_temp3_auto_point1_pwm.attr,
1626 &dev_attr_temp3_auto_point2_pwm.attr, 1626 &dev_attr_temp3_auto_point2_pwm.attr,
1627 NULL
1627}; 1628};
1628 1629
1629static const struct attribute_group adm1026_group_temp3 = { 1630static const struct attribute_group adm1026_group_temp3 = {
@@ -1639,6 +1640,7 @@ static struct attribute *adm1026_attributes_in8_9[] = {
1639 &sensor_dev_attr_in9_max.dev_attr.attr, 1640 &sensor_dev_attr_in9_max.dev_attr.attr,
1640 &sensor_dev_attr_in9_min.dev_attr.attr, 1641 &sensor_dev_attr_in9_min.dev_attr.attr,
1641 &sensor_dev_attr_in9_alarm.dev_attr.attr, 1642 &sensor_dev_attr_in9_alarm.dev_attr.attr,
1643 NULL
1642}; 1644};
1643 1645
1644static const struct attribute_group adm1026_group_in8_9 = { 1646static const struct attribute_group adm1026_group_in8_9 = {
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 0bc897dffa27..2c6608d453c2 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -39,10 +39,8 @@
39 * Addresses to scan 39 * Addresses to scan
40 */ 40 */
41 41
42static unsigned short normal_i2c[] = { 42static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
43 0x28, 0x29, 0x2a, 43 0x2e, 0x2f, I2C_CLIENT_END
44 0x2b, 0x2c, 0x2d,
45 0x2e, 0x2f, I2C_CLIENT_END
46}; 44};
47 45
48/* 46/*
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 5aaad3636c98..2bffcab7dc9f 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -61,7 +61,7 @@
61#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan)) 61#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan))
62 62
63/* Addresses to scan */ 63/* Addresses to scan */
64static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 64static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
65 65
66/* Insmod parameters */ 66/* Insmod parameters */
67I2C_CLIENT_INSMOD_2(adm1030, adm1031); 67I2C_CLIENT_INSMOD_2(adm1030, adm1031);
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 7671d2bf7800..149ef25252e7 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -52,7 +52,7 @@
52#include <linux/mutex.h> 52#include <linux/mutex.h>
53 53
54/* Addresses to scan */ 54/* Addresses to scan */
55static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, 55static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
56 I2C_CLIENT_END }; 56 I2C_CLIENT_END };
57 57
58/* Insmod parameters */ 58/* Insmod parameters */
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 6b8a73ef404c..ed71a8bc70dc 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -44,7 +44,7 @@
44#define ADS7828_INT_VREF_MV 2500 /* Internal vref is 2.5V, 2500mV */ 44#define ADS7828_INT_VREF_MV 2500 /* Internal vref is 2.5V, 2500mV */
45 45
46/* Addresses to scan */ 46/* Addresses to scan */
47static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 47static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
48 I2C_CLIENT_END }; 48 I2C_CLIENT_END };
49 49
50/* Insmod parameters */ 50/* Insmod parameters */
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 747693ab2ff1..6b5325f33a2c 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -30,7 +30,7 @@
30#include <linux/log2.h> 30#include <linux/log2.h>
31 31
32/* Addresses to scan */ 32/* Addresses to scan */
33static unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END }; 33static const unsigned short normal_i2c[] = { 0x2C, 0x2E, 0x2F, I2C_CLIENT_END };
34 34
35/* Insmod parameters */ 35/* Insmod parameters */
36I2C_CLIENT_INSMOD_1(adt7470); 36I2C_CLIENT_INSMOD_1(adt7470);
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
new file mode 100644
index 000000000000..9587869bdba0
--- /dev/null
+++ b/drivers/hwmon/adt7473.c
@@ -0,0 +1,1157 @@
1/*
2 * A hwmon driver for the Analog Devices ADT7473
3 * Copyright (C) 2007 IBM
4 *
5 * Author: Darrick J. Wong <djwong@us.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/jiffies.h>
24#include <linux/i2c.h>
25#include <linux/hwmon.h>
26#include <linux/hwmon-sysfs.h>
27#include <linux/err.h>
28#include <linux/mutex.h>
29#include <linux/delay.h>
30#include <linux/log2.h>
31
32/* Addresses to scan */
33static const unsigned short normal_i2c[] = { 0x2C, 0x2D, 0x2E, I2C_CLIENT_END };
34
35/* Insmod parameters */
36I2C_CLIENT_INSMOD_1(adt7473);
37
38/* ADT7473 registers */
39#define ADT7473_REG_BASE_ADDR 0x20
40
41#define ADT7473_REG_VOLT_BASE_ADDR 0x21
42#define ADT7473_REG_VOLT_MAX_ADDR 0x22
43#define ADT7473_REG_VOLT_MIN_BASE_ADDR 0x46
44#define ADT7473_REG_VOLT_MIN_MAX_ADDR 0x49
45
46#define ADT7473_REG_TEMP_BASE_ADDR 0x25
47#define ADT7473_REG_TEMP_MAX_ADDR 0x27
48#define ADT7473_REG_TEMP_LIMITS_BASE_ADDR 0x4E
49#define ADT7473_REG_TEMP_LIMITS_MAX_ADDR 0x53
50#define ADT7473_REG_TEMP_TMIN_BASE_ADDR 0x67
51#define ADT7473_REG_TEMP_TMIN_MAX_ADDR 0x69
52#define ADT7473_REG_TEMP_TMAX_BASE_ADDR 0x6A
53#define ADT7473_REG_TEMP_TMAX_MAX_ADDR 0x6C
54
55#define ADT7473_REG_FAN_BASE_ADDR 0x28
56#define ADT7473_REG_FAN_MAX_ADDR 0x2F
57#define ADT7473_REG_FAN_MIN_BASE_ADDR 0x54
58#define ADT7473_REG_FAN_MIN_MAX_ADDR 0x5B
59
60#define ADT7473_REG_PWM_BASE_ADDR 0x30
61#define ADT7473_REG_PWM_MAX_ADDR 0x32
62#define ADT7473_REG_PWM_MIN_BASE_ADDR 0x64
63#define ADT7473_REG_PWM_MIN_MAX_ADDR 0x66
64#define ADT7473_REG_PWM_MAX_BASE_ADDR 0x38
65#define ADT7473_REG_PWM_MAX_MAX_ADDR 0x3A
66#define ADT7473_REG_PWM_BHVR_BASE_ADDR 0x5C
67#define ADT7473_REG_PWM_BHVR_MAX_ADDR 0x5E
68#define ADT7473_PWM_BHVR_MASK 0xE0
69#define ADT7473_PWM_BHVR_SHIFT 5
70
71#define ADT7473_REG_CFG1 0x40
72#define ADT7473_CFG1_START 0x01
73#define ADT7473_CFG1_READY 0x04
74#define ADT7473_REG_CFG2 0x73
75#define ADT7473_REG_CFG3 0x78
76#define ADT7473_REG_CFG4 0x7D
77#define ADT7473_CFG4_MAX_DUTY_AT_OVT 0x08
78#define ADT7473_REG_CFG5 0x7C
79#define ADT7473_CFG5_TEMP_TWOS 0x01
80#define ADT7473_CFG5_TEMP_OFFSET 0x02
81
82#define ADT7473_REG_DEVICE 0x3D
83#define ADT7473_VENDOR 0x41
84#define ADT7473_REG_VENDOR 0x3E
85#define ADT7473_DEVICE 0x73
86#define ADT7473_REG_REVISION 0x3F
87#define ADT7473_REV_68 0x68
88#define ADT7473_REV_69 0x69
89
90#define ADT7473_REG_ALARM1 0x41
91#define ADT7473_VCCP_ALARM 0x02
92#define ADT7473_VCC_ALARM 0x04
93#define ADT7473_R1T_ALARM 0x10
94#define ADT7473_LT_ALARM 0x20
95#define ADT7473_R2T_ALARM 0x40
96#define ADT7473_OOL 0x80
97#define ADT7473_REG_ALARM2 0x42
98#define ADT7473_OVT_ALARM 0x02
99#define ADT7473_FAN1_ALARM 0x04
100#define ADT7473_FAN2_ALARM 0x08
101#define ADT7473_FAN3_ALARM 0x10
102#define ADT7473_FAN4_ALARM 0x20
103#define ADT7473_R1T_SHORT 0x40
104#define ADT7473_R2T_SHORT 0x80
105#define ADT7473_REG_MAX_ADDR 0x80
106
107#define ALARM2(x) ((x) << 8)
108
109#define ADT7473_VOLT_COUNT 2
110#define ADT7473_REG_VOLT(x) (ADT7473_REG_VOLT_BASE_ADDR + (x))
111#define ADT7473_REG_VOLT_MIN(x) (ADT7473_REG_VOLT_MIN_BASE_ADDR + ((x) * 2))
112#define ADT7473_REG_VOLT_MAX(x) (ADT7473_REG_VOLT_MIN_BASE_ADDR + \
113 ((x) * 2) + 1)
114
115#define ADT7473_TEMP_COUNT 3
116#define ADT7473_REG_TEMP(x) (ADT7473_REG_TEMP_BASE_ADDR + (x))
117#define ADT7473_REG_TEMP_MIN(x) (ADT7473_REG_TEMP_LIMITS_BASE_ADDR + ((x) * 2))
118#define ADT7473_REG_TEMP_MAX(x) (ADT7473_REG_TEMP_LIMITS_BASE_ADDR + \
119 ((x) * 2) + 1)
120#define ADT7473_REG_TEMP_TMIN(x) (ADT7473_REG_TEMP_TMIN_BASE_ADDR + (x))
121#define ADT7473_REG_TEMP_TMAX(x) (ADT7473_REG_TEMP_TMAX_BASE_ADDR + (x))
122
123#define ADT7473_FAN_COUNT 4
124#define ADT7473_REG_FAN(x) (ADT7473_REG_FAN_BASE_ADDR + ((x) * 2))
125#define ADT7473_REG_FAN_MIN(x) (ADT7473_REG_FAN_MIN_BASE_ADDR + ((x) * 2))
126
127#define ADT7473_PWM_COUNT 3
128#define ADT7473_REG_PWM(x) (ADT7473_REG_PWM_BASE_ADDR + (x))
129#define ADT7473_REG_PWM_MAX(x) (ADT7473_REG_PWM_MAX_BASE_ADDR + (x))
130#define ADT7473_REG_PWM_MIN(x) (ADT7473_REG_PWM_MIN_BASE_ADDR + (x))
131#define ADT7473_REG_PWM_BHVR(x) (ADT7473_REG_PWM_BHVR_BASE_ADDR + (x))
132
133/* How often do we reread sensors values? (In jiffies) */
134#define SENSOR_REFRESH_INTERVAL (2 * HZ)
135
136/* How often do we reread sensor limit values? (In jiffies) */
137#define LIMIT_REFRESH_INTERVAL (60 * HZ)
138
139/* datasheet says to divide this number by the fan reading to get fan rpm */
140#define FAN_PERIOD_TO_RPM(x) ((90000 * 60) / (x))
141#define FAN_RPM_TO_PERIOD FAN_PERIOD_TO_RPM
142#define FAN_PERIOD_INVALID 65535
143#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
144
145struct adt7473_data {
146 struct i2c_client client;
147 struct device *hwmon_dev;
148 struct attribute_group attrs;
149 struct mutex lock;
150 char sensors_valid;
151 char limits_valid;
152 unsigned long sensors_last_updated; /* In jiffies */
153 unsigned long limits_last_updated; /* In jiffies */
154
155 u8 volt[ADT7473_VOLT_COUNT];
156 s8 volt_min[ADT7473_VOLT_COUNT];
157 s8 volt_max[ADT7473_VOLT_COUNT];
158
159 s8 temp[ADT7473_TEMP_COUNT];
160 s8 temp_min[ADT7473_TEMP_COUNT];
161 s8 temp_max[ADT7473_TEMP_COUNT];
162 s8 temp_tmin[ADT7473_TEMP_COUNT];
163 /* This is called the !THERM limit in the datasheet */
164 s8 temp_tmax[ADT7473_TEMP_COUNT];
165
166 u16 fan[ADT7473_FAN_COUNT];
167 u16 fan_min[ADT7473_FAN_COUNT];
168
169 u8 pwm[ADT7473_PWM_COUNT];
170 u8 pwm_max[ADT7473_PWM_COUNT];
171 u8 pwm_min[ADT7473_PWM_COUNT];
172 u8 pwm_behavior[ADT7473_PWM_COUNT];
173
174 u8 temp_twos_complement;
175 u8 temp_offset;
176
177 u16 alarm;
178 u8 max_duty_at_overheat;
179};
180
181static int adt7473_attach_adapter(struct i2c_adapter *adapter);
182static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind);
183static int adt7473_detach_client(struct i2c_client *client);
184
185static struct i2c_driver adt7473_driver = {
186 .driver = {
187 .name = "adt7473",
188 },
189 .attach_adapter = adt7473_attach_adapter,
190 .detach_client = adt7473_detach_client,
191};
192
193/*
194 * 16-bit registers on the ADT7473 are low-byte first. The data sheet says
195 * that the low byte must be read before the high byte.
196 */
197static inline int adt7473_read_word_data(struct i2c_client *client, u8 reg)
198{
199 u16 foo;
200 foo = i2c_smbus_read_byte_data(client, reg);
201 foo |= ((u16)i2c_smbus_read_byte_data(client, reg + 1) << 8);
202 return foo;
203}
204
205static inline int adt7473_write_word_data(struct i2c_client *client, u8 reg,
206 u16 value)
207{
208 return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
209 && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
210}
211
212static void adt7473_init_client(struct i2c_client *client)
213{
214 int reg = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG1);
215
216 if (!(reg & ADT7473_CFG1_READY)) {
217 dev_err(&client->dev, "Chip not ready.\n");
218 } else {
219 /* start monitoring */
220 i2c_smbus_write_byte_data(client, ADT7473_REG_CFG1,
221 reg | ADT7473_CFG1_START);
222 }
223}
224
225static struct adt7473_data *adt7473_update_device(struct device *dev)
226{
227 struct i2c_client *client = to_i2c_client(dev);
228 struct adt7473_data *data = i2c_get_clientdata(client);
229 unsigned long local_jiffies = jiffies;
230 u8 cfg;
231 int i;
232
233 mutex_lock(&data->lock);
234 if (time_before(local_jiffies, data->sensors_last_updated +
235 SENSOR_REFRESH_INTERVAL)
236 && data->sensors_valid)
237 goto no_sensor_update;
238
239 for (i = 0; i < ADT7473_VOLT_COUNT; i++)
240 data->volt[i] = i2c_smbus_read_byte_data(client,
241 ADT7473_REG_VOLT(i));
242
243 /* Determine temperature encoding */
244 cfg = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG5);
245 data->temp_twos_complement = (cfg & ADT7473_CFG5_TEMP_TWOS);
246
247 /*
248 * What does this do? it implies a variable temperature sensor
249 * offset, but the datasheet doesn't say anything about this bit
250 * and other parts of the datasheet imply that "offset64" mode
251 * means that you shift temp values by -64 if the above bit was set.
252 */
253 data->temp_offset = (cfg & ADT7473_CFG5_TEMP_OFFSET);
254
255 for (i = 0; i < ADT7473_TEMP_COUNT; i++)
256 data->temp[i] = i2c_smbus_read_byte_data(client,
257 ADT7473_REG_TEMP(i));
258
259 for (i = 0; i < ADT7473_FAN_COUNT; i++)
260 data->fan[i] = adt7473_read_word_data(client,
261 ADT7473_REG_FAN(i));
262
263 for (i = 0; i < ADT7473_PWM_COUNT; i++)
264 data->pwm[i] = i2c_smbus_read_byte_data(client,
265 ADT7473_REG_PWM(i));
266
267 data->alarm = i2c_smbus_read_byte_data(client, ADT7473_REG_ALARM1);
268 if (data->alarm & ADT7473_OOL)
269 data->alarm |= ALARM2(i2c_smbus_read_byte_data(client,
270 ADT7473_REG_ALARM2));
271
272 data->sensors_last_updated = local_jiffies;
273 data->sensors_valid = 1;
274
275no_sensor_update:
276 if (time_before(local_jiffies, data->limits_last_updated +
277 LIMIT_REFRESH_INTERVAL)
278 && data->limits_valid)
279 goto out;
280
281 for (i = 0; i < ADT7473_VOLT_COUNT; i++) {
282 data->volt_min[i] = i2c_smbus_read_byte_data(client,
283 ADT7473_REG_VOLT_MIN(i));
284 data->volt_max[i] = i2c_smbus_read_byte_data(client,
285 ADT7473_REG_VOLT_MAX(i));
286 }
287
288 for (i = 0; i < ADT7473_TEMP_COUNT; i++) {
289 data->temp_min[i] = i2c_smbus_read_byte_data(client,
290 ADT7473_REG_TEMP_MIN(i));
291 data->temp_max[i] = i2c_smbus_read_byte_data(client,
292 ADT7473_REG_TEMP_MAX(i));
293 data->temp_tmin[i] = i2c_smbus_read_byte_data(client,
294 ADT7473_REG_TEMP_TMIN(i));
295 data->temp_tmax[i] = i2c_smbus_read_byte_data(client,
296 ADT7473_REG_TEMP_TMAX(i));
297 }
298
299 for (i = 0; i < ADT7473_FAN_COUNT; i++)
300 data->fan_min[i] = adt7473_read_word_data(client,
301 ADT7473_REG_FAN_MIN(i));
302
303 for (i = 0; i < ADT7473_PWM_COUNT; i++) {
304 data->pwm_max[i] = i2c_smbus_read_byte_data(client,
305 ADT7473_REG_PWM_MAX(i));
306 data->pwm_min[i] = i2c_smbus_read_byte_data(client,
307 ADT7473_REG_PWM_MIN(i));
308 data->pwm_behavior[i] = i2c_smbus_read_byte_data(client,
309 ADT7473_REG_PWM_BHVR(i));
310 }
311
312 data->limits_last_updated = local_jiffies;
313 data->limits_valid = 1;
314
315out:
316 mutex_unlock(&data->lock);
317 return data;
318}
319
320/*
321 * On this chip, voltages are given as a count of steps between a minimum
322 * and maximum voltage, not a direct voltage.
323 */
324static const int volt_convert_table[][2] = {
325 {2997, 3},
326 {4395, 4},
327};
328
329static int decode_volt(int volt_index, u8 raw)
330{
331 int cmax = volt_convert_table[volt_index][0];
332 int cmin = volt_convert_table[volt_index][1];
333 return ((raw * (cmax - cmin)) / 255) + cmin;
334}
335
336static u8 encode_volt(int volt_index, int cooked)
337{
338 int cmax = volt_convert_table[volt_index][0];
339 int cmin = volt_convert_table[volt_index][1];
340 u8 x;
341
342 if (cooked > cmax)
343 cooked = cmax;
344 else if (cooked < cmin)
345 cooked = cmin;
346
347 x = ((cooked - cmin) * 255) / (cmax - cmin);
348
349 return x;
350}
351
352static ssize_t show_volt_min(struct device *dev,
353 struct device_attribute *devattr,
354 char *buf)
355{
356 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
357 struct adt7473_data *data = adt7473_update_device(dev);
358 return sprintf(buf, "%d\n",
359 decode_volt(attr->index, data->volt_min[attr->index]));
360}
361
362static ssize_t set_volt_min(struct device *dev,
363 struct device_attribute *devattr,
364 const char *buf,
365 size_t count)
366{
367 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
368 struct i2c_client *client = to_i2c_client(dev);
369 struct adt7473_data *data = i2c_get_clientdata(client);
370 int volt = encode_volt(attr->index, simple_strtol(buf, NULL, 10));
371
372 mutex_lock(&data->lock);
373 data->volt_min[attr->index] = volt;
374 i2c_smbus_write_byte_data(client, ADT7473_REG_VOLT_MIN(attr->index),
375 volt);
376 mutex_unlock(&data->lock);
377
378 return count;
379}
380
381static ssize_t show_volt_max(struct device *dev,
382 struct device_attribute *devattr,
383 char *buf)
384{
385 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
386 struct adt7473_data *data = adt7473_update_device(dev);
387 return sprintf(buf, "%d\n",
388 decode_volt(attr->index, data->volt_max[attr->index]));
389}
390
391static ssize_t set_volt_max(struct device *dev,
392 struct device_attribute *devattr,
393 const char *buf,
394 size_t count)
395{
396 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
397 struct i2c_client *client = to_i2c_client(dev);
398 struct adt7473_data *data = i2c_get_clientdata(client);
399 int volt = encode_volt(attr->index, simple_strtol(buf, NULL, 10));
400
401 mutex_lock(&data->lock);
402 data->volt_max[attr->index] = volt;
403 i2c_smbus_write_byte_data(client, ADT7473_REG_VOLT_MAX(attr->index),
404 volt);
405 mutex_unlock(&data->lock);
406
407 return count;
408}
409
410static ssize_t show_volt(struct device *dev, struct device_attribute *devattr,
411 char *buf)
412{
413 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
414 struct adt7473_data *data = adt7473_update_device(dev);
415
416 return sprintf(buf, "%d\n",
417 decode_volt(attr->index, data->volt[attr->index]));
418}
419
420/*
421 * This chip can report temperature data either as a two's complement
422 * number in the range -128 to 127, or as an unsigned number that must
423 * be offset by 64.
424 */
425static int decode_temp(struct adt7473_data *data, u8 raw)
426{
427 if (data->temp_twos_complement)
428 return (s8)raw;
429 return raw - 64;
430}
431
432static u8 encode_temp(struct adt7473_data *data, int cooked)
433{
434 if (data->temp_twos_complement)
435 return (cooked & 0xFF);
436 return cooked + 64;
437}
438
439static ssize_t show_temp_min(struct device *dev,
440 struct device_attribute *devattr,
441 char *buf)
442{
443 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
444 struct adt7473_data *data = adt7473_update_device(dev);
445 return sprintf(buf, "%d\n",
446 1000 * decode_temp(data, data->temp_min[attr->index]));
447}
448
449static ssize_t set_temp_min(struct device *dev,
450 struct device_attribute *devattr,
451 const char *buf,
452 size_t count)
453{
454 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
455 struct i2c_client *client = to_i2c_client(dev);
456 struct adt7473_data *data = i2c_get_clientdata(client);
457 int temp = simple_strtol(buf, NULL, 10) / 1000;
458 temp = encode_temp(data, temp);
459
460 mutex_lock(&data->lock);
461 data->temp_min[attr->index] = temp;
462 i2c_smbus_write_byte_data(client, ADT7473_REG_TEMP_MIN(attr->index),
463 temp);
464 mutex_unlock(&data->lock);
465
466 return count;
467}
468
469static ssize_t show_temp_max(struct device *dev,
470 struct device_attribute *devattr,
471 char *buf)
472{
473 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
474 struct adt7473_data *data = adt7473_update_device(dev);
475 return sprintf(buf, "%d\n",
476 1000 * decode_temp(data, data->temp_max[attr->index]));
477}
478
479static ssize_t set_temp_max(struct device *dev,
480 struct device_attribute *devattr,
481 const char *buf,
482 size_t count)
483{
484 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
485 struct i2c_client *client = to_i2c_client(dev);
486 struct adt7473_data *data = i2c_get_clientdata(client);
487 int temp = simple_strtol(buf, NULL, 10) / 1000;
488 temp = encode_temp(data, temp);
489
490 mutex_lock(&data->lock);
491 data->temp_max[attr->index] = temp;
492 i2c_smbus_write_byte_data(client, ADT7473_REG_TEMP_MAX(attr->index),
493 temp);
494 mutex_unlock(&data->lock);
495
496 return count;
497}
498
499static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
500 char *buf)
501{
502 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
503 struct adt7473_data *data = adt7473_update_device(dev);
504 return sprintf(buf, "%d\n",
505 1000 * decode_temp(data, data->temp[attr->index]));
506}
507
508static ssize_t show_fan_min(struct device *dev,
509 struct device_attribute *devattr,
510 char *buf)
511{
512 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
513 struct adt7473_data *data = adt7473_update_device(dev);
514
515 if (FAN_DATA_VALID(data->fan_min[attr->index]))
516 return sprintf(buf, "%d\n",
517 FAN_PERIOD_TO_RPM(data->fan_min[attr->index]));
518 else
519 return sprintf(buf, "0\n");
520}
521
522static ssize_t set_fan_min(struct device *dev,
523 struct device_attribute *devattr,
524 const char *buf, size_t count)
525{
526 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
527 struct i2c_client *client = to_i2c_client(dev);
528 struct adt7473_data *data = i2c_get_clientdata(client);
529 int temp = simple_strtol(buf, NULL, 10);
530
531 if (!temp)
532 return -EINVAL;
533 temp = FAN_RPM_TO_PERIOD(temp);
534
535 mutex_lock(&data->lock);
536 data->fan_min[attr->index] = temp;
537 adt7473_write_word_data(client, ADT7473_REG_FAN_MIN(attr->index), temp);
538 mutex_unlock(&data->lock);
539
540 return count;
541}
542
543static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
544 char *buf)
545{
546 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
547 struct adt7473_data *data = adt7473_update_device(dev);
548
549 if (FAN_DATA_VALID(data->fan[attr->index]))
550 return sprintf(buf, "%d\n",
551 FAN_PERIOD_TO_RPM(data->fan[attr->index]));
552 else
553 return sprintf(buf, "0\n");
554}
555
556static ssize_t show_max_duty_at_crit(struct device *dev,
557 struct device_attribute *devattr,
558 char *buf)
559{
560 struct adt7473_data *data = adt7473_update_device(dev);
561 return sprintf(buf, "%d\n", data->max_duty_at_overheat);
562}
563
564static ssize_t set_max_duty_at_crit(struct device *dev,
565 struct device_attribute *devattr,
566 const char *buf,
567 size_t count)
568{
569 u8 reg;
570 struct i2c_client *client = to_i2c_client(dev);
571 struct adt7473_data *data = i2c_get_clientdata(client);
572 int temp = simple_strtol(buf, NULL, 10);
573 temp = temp && 0xFF;
574
575 mutex_lock(&data->lock);
576 data->max_duty_at_overheat = temp;
577 reg = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG4);
578 if (temp)
579 reg |= ADT7473_CFG4_MAX_DUTY_AT_OVT;
580 else
581 reg &= ~ADT7473_CFG4_MAX_DUTY_AT_OVT;
582 i2c_smbus_write_byte_data(client, ADT7473_REG_CFG4, reg);
583 mutex_unlock(&data->lock);
584
585 return count;
586}
587
588static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
589 char *buf)
590{
591 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
592 struct adt7473_data *data = adt7473_update_device(dev);
593 return sprintf(buf, "%d\n", data->pwm[attr->index]);
594}
595
596static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
597 const char *buf, size_t count)
598{
599 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
600 struct i2c_client *client = to_i2c_client(dev);
601 struct adt7473_data *data = i2c_get_clientdata(client);
602 int temp = simple_strtol(buf, NULL, 10);
603
604 mutex_lock(&data->lock);
605 data->pwm[attr->index] = temp;
606 i2c_smbus_write_byte_data(client, ADT7473_REG_PWM(attr->index), temp);
607 mutex_unlock(&data->lock);
608
609 return count;
610}
611
612static ssize_t show_pwm_max(struct device *dev,
613 struct device_attribute *devattr,
614 char *buf)
615{
616 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
617 struct adt7473_data *data = adt7473_update_device(dev);
618 return sprintf(buf, "%d\n", data->pwm_max[attr->index]);
619}
620
621static ssize_t set_pwm_max(struct device *dev,
622 struct device_attribute *devattr,
623 const char *buf,
624 size_t count)
625{
626 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
627 struct i2c_client *client = to_i2c_client(dev);
628 struct adt7473_data *data = i2c_get_clientdata(client);
629 int temp = simple_strtol(buf, NULL, 10);
630
631 mutex_lock(&data->lock);
632 data->pwm_max[attr->index] = temp;
633 i2c_smbus_write_byte_data(client, ADT7473_REG_PWM_MAX(attr->index),
634 temp);
635 mutex_unlock(&data->lock);
636
637 return count;
638}
639
640static ssize_t show_pwm_min(struct device *dev,
641 struct device_attribute *devattr,
642 char *buf)
643{
644 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
645 struct adt7473_data *data = adt7473_update_device(dev);
646 return sprintf(buf, "%d\n", data->pwm_min[attr->index]);
647}
648
649static ssize_t set_pwm_min(struct device *dev,
650 struct device_attribute *devattr,
651 const char *buf,
652 size_t count)
653{
654 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
655 struct i2c_client *client = to_i2c_client(dev);
656 struct adt7473_data *data = i2c_get_clientdata(client);
657 int temp = simple_strtol(buf, NULL, 10);
658
659 mutex_lock(&data->lock);
660 data->pwm_min[attr->index] = temp;
661 i2c_smbus_write_byte_data(client, ADT7473_REG_PWM_MIN(attr->index),
662 temp);
663 mutex_unlock(&data->lock);
664
665 return count;
666}
667
668static ssize_t show_temp_tmax(struct device *dev,
669 struct device_attribute *devattr,
670 char *buf)
671{
672 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
673 struct adt7473_data *data = adt7473_update_device(dev);
674 return sprintf(buf, "%d\n",
675 1000 * decode_temp(data, data->temp_tmax[attr->index]));
676}
677
678static ssize_t set_temp_tmax(struct device *dev,
679 struct device_attribute *devattr,
680 const char *buf,
681 size_t count)
682{
683 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
684 struct i2c_client *client = to_i2c_client(dev);
685 struct adt7473_data *data = i2c_get_clientdata(client);
686 int temp = simple_strtol(buf, NULL, 10) / 1000;
687 temp = encode_temp(data, temp);
688
689 mutex_lock(&data->lock);
690 data->temp_tmax[attr->index] = temp;
691 i2c_smbus_write_byte_data(client, ADT7473_REG_TEMP_TMAX(attr->index),
692 temp);
693 mutex_unlock(&data->lock);
694
695 return count;
696}
697
698static ssize_t show_temp_tmin(struct device *dev,
699 struct device_attribute *devattr,
700 char *buf)
701{
702 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
703 struct adt7473_data *data = adt7473_update_device(dev);
704 return sprintf(buf, "%d\n",
705 1000 * decode_temp(data, data->temp_tmin[attr->index]));
706}
707
708static ssize_t set_temp_tmin(struct device *dev,
709 struct device_attribute *devattr,
710 const char *buf,
711 size_t count)
712{
713 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
714 struct i2c_client *client = to_i2c_client(dev);
715 struct adt7473_data *data = i2c_get_clientdata(client);
716 int temp = simple_strtol(buf, NULL, 10) / 1000;
717 temp = encode_temp(data, temp);
718
719 mutex_lock(&data->lock);
720 data->temp_tmin[attr->index] = temp;
721 i2c_smbus_write_byte_data(client, ADT7473_REG_TEMP_TMIN(attr->index),
722 temp);
723 mutex_unlock(&data->lock);
724
725 return count;
726}
727
728static ssize_t show_pwm_enable(struct device *dev,
729 struct device_attribute *devattr,
730 char *buf)
731{
732 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
733 struct adt7473_data *data = adt7473_update_device(dev);
734
735 switch (data->pwm_behavior[attr->index] >> ADT7473_PWM_BHVR_SHIFT) {
736 case 3:
737 return sprintf(buf, "0\n");
738 case 7:
739 return sprintf(buf, "1\n");
740 default:
741 return sprintf(buf, "2\n");
742 }
743}
744
745static ssize_t set_pwm_enable(struct device *dev,
746 struct device_attribute *devattr,
747 const char *buf,
748 size_t count)
749{
750 u8 reg;
751 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
752 struct i2c_client *client = to_i2c_client(dev);
753 struct adt7473_data *data = i2c_get_clientdata(client);
754 int temp = simple_strtol(buf, NULL, 10);
755
756 switch (temp) {
757 case 0:
758 temp = 3;
759 break;
760 case 1:
761 temp = 7;
762 break;
763 case 2:
764 /* Enter automatic mode with fans off */
765 temp = 4;
766 break;
767 default:
768 return -EINVAL;
769 }
770
771 mutex_lock(&data->lock);
772 reg = i2c_smbus_read_byte_data(client,
773 ADT7473_REG_PWM_BHVR(attr->index));
774 reg = (temp << ADT7473_PWM_BHVR_SHIFT) |
775 (reg & ~ADT7473_PWM_BHVR_MASK);
776 i2c_smbus_write_byte_data(client, ADT7473_REG_PWM_BHVR(attr->index),
777 reg);
778 data->pwm_behavior[attr->index] = reg;
779 mutex_unlock(&data->lock);
780
781 return count;
782}
783
784static ssize_t show_pwm_auto_temp(struct device *dev,
785 struct device_attribute *devattr,
786 char *buf)
787{
788 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
789 struct adt7473_data *data = adt7473_update_device(dev);
790 int bhvr = data->pwm_behavior[attr->index] >> ADT7473_PWM_BHVR_SHIFT;
791
792 switch (bhvr) {
793 case 3:
794 case 4:
795 case 7:
796 return sprintf(buf, "0\n");
797 case 0:
798 case 1:
799 case 5:
800 case 6:
801 return sprintf(buf, "%d\n", bhvr + 1);
802 case 2:
803 return sprintf(buf, "4\n");
804 }
805 /* shouldn't ever get here */
806 BUG();
807}
808
809static ssize_t set_pwm_auto_temp(struct device *dev,
810 struct device_attribute *devattr,
811 const char *buf,
812 size_t count)
813{
814 u8 reg;
815 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
816 struct i2c_client *client = to_i2c_client(dev);
817 struct adt7473_data *data = i2c_get_clientdata(client);
818 int temp = simple_strtol(buf, NULL, 10);
819
820 switch (temp) {
821 case 1:
822 case 2:
823 case 6:
824 case 7:
825 temp--;
826 break;
827 case 0:
828 temp = 4;
829 break;
830 default:
831 return -EINVAL;
832 }
833
834 mutex_lock(&data->lock);
835 reg = i2c_smbus_read_byte_data(client,
836 ADT7473_REG_PWM_BHVR(attr->index));
837 reg = (temp << ADT7473_PWM_BHVR_SHIFT) |
838 (reg & ~ADT7473_PWM_BHVR_MASK);
839 i2c_smbus_write_byte_data(client, ADT7473_REG_PWM_BHVR(attr->index),
840 reg);
841 data->pwm_behavior[attr->index] = reg;
842 mutex_unlock(&data->lock);
843
844 return count;
845}
846
847static ssize_t show_alarm(struct device *dev,
848 struct device_attribute *devattr,
849 char *buf)
850{
851 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
852 struct adt7473_data *data = adt7473_update_device(dev);
853
854 if (data->alarm & attr->index)
855 return sprintf(buf, "1\n");
856 else
857 return sprintf(buf, "0\n");
858}
859
860
861static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_volt_max,
862 set_volt_max, 0);
863static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_volt_max,
864 set_volt_max, 1);
865
866static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_volt_min,
867 set_volt_min, 0);
868static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_volt_min,
869 set_volt_min, 1);
870
871static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_volt, NULL, 0);
872static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_volt, NULL, 1);
873
874static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL,
875 ADT7473_VCCP_ALARM);
876static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL,
877 ADT7473_VCC_ALARM);
878
879static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
880 set_temp_max, 0);
881static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
882 set_temp_max, 1);
883static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp_max,
884 set_temp_max, 2);
885
886static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
887 set_temp_min, 0);
888static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
889 set_temp_min, 1);
890static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_temp_min,
891 set_temp_min, 2);
892
893static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
894static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
895static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
896
897static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL,
898 ADT7473_R1T_ALARM | ALARM2(ADT7473_R1T_SHORT));
899static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL,
900 ADT7473_LT_ALARM);
901static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL,
902 ADT7473_R2T_ALARM | ALARM2(ADT7473_R2T_SHORT));
903
904static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
905 set_fan_min, 0);
906static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
907 set_fan_min, 1);
908static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
909 set_fan_min, 2);
910static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
911 set_fan_min, 3);
912
913static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
914static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
915static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
916static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
917
918static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL,
919 ALARM2(ADT7473_FAN1_ALARM));
920static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL,
921 ALARM2(ADT7473_FAN2_ALARM));
922static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL,
923 ALARM2(ADT7473_FAN3_ALARM));
924static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL,
925 ALARM2(ADT7473_FAN4_ALARM));
926
927static SENSOR_DEVICE_ATTR(pwm_use_point2_pwm_at_crit, S_IWUSR | S_IRUGO,
928 show_max_duty_at_crit, set_max_duty_at_crit, 0);
929
930static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
931static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
932static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
933
934static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
935 show_pwm_min, set_pwm_min, 0);
936static SENSOR_DEVICE_ATTR(pwm2_auto_point1_pwm, S_IWUSR | S_IRUGO,
937 show_pwm_min, set_pwm_min, 1);
938static SENSOR_DEVICE_ATTR(pwm3_auto_point1_pwm, S_IWUSR | S_IRUGO,
939 show_pwm_min, set_pwm_min, 2);
940
941static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
942 show_pwm_max, set_pwm_max, 0);
943static SENSOR_DEVICE_ATTR(pwm2_auto_point2_pwm, S_IWUSR | S_IRUGO,
944 show_pwm_max, set_pwm_max, 1);
945static SENSOR_DEVICE_ATTR(pwm3_auto_point2_pwm, S_IWUSR | S_IRUGO,
946 show_pwm_max, set_pwm_max, 2);
947
948static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IWUSR | S_IRUGO,
949 show_temp_tmin, set_temp_tmin, 0);
950static SENSOR_DEVICE_ATTR(temp2_auto_point1_temp, S_IWUSR | S_IRUGO,
951 show_temp_tmin, set_temp_tmin, 1);
952static SENSOR_DEVICE_ATTR(temp3_auto_point1_temp, S_IWUSR | S_IRUGO,
953 show_temp_tmin, set_temp_tmin, 2);
954
955static SENSOR_DEVICE_ATTR(temp1_auto_point2_temp, S_IWUSR | S_IRUGO,
956 show_temp_tmax, set_temp_tmax, 0);
957static SENSOR_DEVICE_ATTR(temp2_auto_point2_temp, S_IWUSR | S_IRUGO,
958 show_temp_tmax, set_temp_tmax, 1);
959static SENSOR_DEVICE_ATTR(temp3_auto_point2_temp, S_IWUSR | S_IRUGO,
960 show_temp_tmax, set_temp_tmax, 2);
961
962static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
963 set_pwm_enable, 0);
964static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
965 set_pwm_enable, 1);
966static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
967 set_pwm_enable, 2);
968
969static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IWUSR | S_IRUGO,
970 show_pwm_auto_temp, set_pwm_auto_temp, 0);
971static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IWUSR | S_IRUGO,
972 show_pwm_auto_temp, set_pwm_auto_temp, 1);
973static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IWUSR | S_IRUGO,
974 show_pwm_auto_temp, set_pwm_auto_temp, 2);
975
976static struct attribute *adt7473_attr[] =
977{
978 &sensor_dev_attr_in1_max.dev_attr.attr,
979 &sensor_dev_attr_in2_max.dev_attr.attr,
980 &sensor_dev_attr_in1_min.dev_attr.attr,
981 &sensor_dev_attr_in2_min.dev_attr.attr,
982 &sensor_dev_attr_in1_input.dev_attr.attr,
983 &sensor_dev_attr_in2_input.dev_attr.attr,
984 &sensor_dev_attr_in1_alarm.dev_attr.attr,
985 &sensor_dev_attr_in2_alarm.dev_attr.attr,
986
987 &sensor_dev_attr_temp1_max.dev_attr.attr,
988 &sensor_dev_attr_temp2_max.dev_attr.attr,
989 &sensor_dev_attr_temp3_max.dev_attr.attr,
990 &sensor_dev_attr_temp1_min.dev_attr.attr,
991 &sensor_dev_attr_temp2_min.dev_attr.attr,
992 &sensor_dev_attr_temp3_min.dev_attr.attr,
993 &sensor_dev_attr_temp1_input.dev_attr.attr,
994 &sensor_dev_attr_temp2_input.dev_attr.attr,
995 &sensor_dev_attr_temp3_input.dev_attr.attr,
996 &sensor_dev_attr_temp1_alarm.dev_attr.attr,
997 &sensor_dev_attr_temp2_alarm.dev_attr.attr,
998 &sensor_dev_attr_temp3_alarm.dev_attr.attr,
999 &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
1000 &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
1001 &sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr,
1002 &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
1003 &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
1004 &sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
1005
1006 &sensor_dev_attr_fan1_min.dev_attr.attr,
1007 &sensor_dev_attr_fan2_min.dev_attr.attr,
1008 &sensor_dev_attr_fan3_min.dev_attr.attr,
1009 &sensor_dev_attr_fan4_min.dev_attr.attr,
1010 &sensor_dev_attr_fan1_input.dev_attr.attr,
1011 &sensor_dev_attr_fan2_input.dev_attr.attr,
1012 &sensor_dev_attr_fan3_input.dev_attr.attr,
1013 &sensor_dev_attr_fan4_input.dev_attr.attr,
1014 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
1015 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
1016 &sensor_dev_attr_fan3_alarm.dev_attr.attr,
1017 &sensor_dev_attr_fan4_alarm.dev_attr.attr,
1018
1019 &sensor_dev_attr_pwm_use_point2_pwm_at_crit.dev_attr.attr,
1020
1021 &sensor_dev_attr_pwm1.dev_attr.attr,
1022 &sensor_dev_attr_pwm2.dev_attr.attr,
1023 &sensor_dev_attr_pwm3.dev_attr.attr,
1024 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
1025 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
1026 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
1027 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
1028 &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
1029 &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
1030
1031 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
1032 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
1033 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
1034 &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
1035 &sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
1036 &sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
1037
1038 NULL
1039};
1040
1041static int adt7473_attach_adapter(struct i2c_adapter *adapter)
1042{
1043 if (!(adapter->class & I2C_CLASS_HWMON))
1044 return 0;
1045 return i2c_probe(adapter, &addr_data, adt7473_detect);
1046}
1047
1048static int adt7473_detect(struct i2c_adapter *adapter, int address, int kind)
1049{
1050 struct i2c_client *client;
1051 struct adt7473_data *data;
1052 int err = 0;
1053
1054 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
1055 goto exit;
1056
1057 data = kzalloc(sizeof(struct adt7473_data), GFP_KERNEL);
1058 if (!data) {
1059 err = -ENOMEM;
1060 goto exit;
1061 }
1062
1063 client = &data->client;
1064 client->addr = address;
1065 client->adapter = adapter;
1066 client->driver = &adt7473_driver;
1067
1068 i2c_set_clientdata(client, data);
1069
1070 mutex_init(&data->lock);
1071
1072 if (kind <= 0) {
1073 int vendor, device, revision;
1074
1075 vendor = i2c_smbus_read_byte_data(client, ADT7473_REG_VENDOR);
1076 if (vendor != ADT7473_VENDOR) {
1077 err = -ENODEV;
1078 goto exit_free;
1079 }
1080
1081 device = i2c_smbus_read_byte_data(client, ADT7473_REG_DEVICE);
1082 if (device != ADT7473_DEVICE) {
1083 err = -ENODEV;
1084 goto exit_free;
1085 }
1086
1087 revision = i2c_smbus_read_byte_data(client,
1088 ADT7473_REG_REVISION);
1089 if (revision != ADT7473_REV_68 && revision != ADT7473_REV_69) {
1090 err = -ENODEV;
1091 goto exit_free;
1092 }
1093 } else
1094 dev_dbg(&adapter->dev, "detection forced\n");
1095
1096 strlcpy(client->name, "adt7473", I2C_NAME_SIZE);
1097
1098 err = i2c_attach_client(client);
1099 if (err)
1100 goto exit_free;
1101
1102 dev_info(&client->dev, "%s chip found\n", client->name);
1103
1104 /* Initialize the ADT7473 chip */
1105 adt7473_init_client(client);
1106
1107 /* Register sysfs hooks */
1108 data->attrs.attrs = adt7473_attr;
1109 err = sysfs_create_group(&client->dev.kobj, &data->attrs);
1110 if (err)
1111 goto exit_detach;
1112
1113 data->hwmon_dev = hwmon_device_register(&client->dev);
1114 if (IS_ERR(data->hwmon_dev)) {
1115 err = PTR_ERR(data->hwmon_dev);
1116 goto exit_remove;
1117 }
1118
1119 return 0;
1120
1121exit_remove:
1122 sysfs_remove_group(&client->dev.kobj, &data->attrs);
1123exit_detach:
1124 i2c_detach_client(client);
1125exit_free:
1126 kfree(data);
1127exit:
1128 return err;
1129}
1130
1131static int adt7473_detach_client(struct i2c_client *client)
1132{
1133 struct adt7473_data *data = i2c_get_clientdata(client);
1134
1135 hwmon_device_unregister(data->hwmon_dev);
1136 sysfs_remove_group(&client->dev.kobj, &data->attrs);
1137 i2c_detach_client(client);
1138 kfree(data);
1139 return 0;
1140}
1141
1142static int __init adt7473_init(void)
1143{
1144 return i2c_add_driver(&adt7473_driver);
1145}
1146
1147static void __exit adt7473_exit(void)
1148{
1149 i2c_del_driver(&adt7473_driver);
1150}
1151
1152MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
1153MODULE_DESCRIPTION("ADT7473 driver");
1154MODULE_LICENSE("GPL");
1155
1156module_init(adt7473_init);
1157module_exit(adt7473_exit);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0c94770b7f83..aacc0c4b809c 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -84,12 +84,15 @@ static const char* temperature_sensors_sets[][36] = {
84/* Set 0: Macbook Pro */ 84/* Set 0: Macbook Pro */
85 { "TA0P", "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "Th0H", 85 { "TA0P", "TB0T", "TC0D", "TC0P", "TG0H", "TG0P", "TG0T", "Th0H",
86 "Th1H", "Tm0P", "Ts0P", "Ts1P", NULL }, 86 "Th1H", "Tm0P", "Ts0P", "Ts1P", NULL },
87/* Set 1: Macbook set */ 87/* Set 1: Macbook2 set */
88 { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TN1P", "TTF0", "Th0H",
89 "Th0S", "Th1H", NULL },
90/* Set 2: Macbook set */
88 { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TN1P", "Th0H", "Th0S", 91 { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TN1P", "Th0H", "Th0S",
89 "Th1H", "Ts0P", NULL }, 92 "Th1H", "Ts0P", NULL },
90/* Set 2: Macmini set */ 93/* Set 3: Macmini set */
91 { "TC0D", "TC0P", NULL }, 94 { "TC0D", "TC0P", NULL },
92/* Set 3: Mac Pro (2 x Quad-Core) */ 95/* Set 4: Mac Pro (2 x Quad-Core) */
93 { "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P", 96 { "TA0P", "TCAG", "TCAH", "TCBG", "TCBH", "TC0C", "TC0D", "TC0P",
94 "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "THTG", "TH0P", 97 "TC1C", "TC1D", "TC2C", "TC2D", "TC3C", "TC3D", "THTG", "TH0P",
95 "TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", 98 "TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S",
@@ -1212,12 +1215,14 @@ static void applesmc_release_accelerometer(void)
1212static __initdata struct dmi_match_data applesmc_dmi_data[] = { 1215static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1213/* MacBook Pro: accelerometer, backlight and temperature set 0 */ 1216/* MacBook Pro: accelerometer, backlight and temperature set 0 */
1214 { .accelerometer = 1, .light = 1, .temperature_set = 0 }, 1217 { .accelerometer = 1, .light = 1, .temperature_set = 0 },
1215/* MacBook: accelerometer and temperature set 1 */ 1218/* MacBook2: accelerometer and temperature set 1 */
1216 { .accelerometer = 1, .light = 0, .temperature_set = 1 }, 1219 { .accelerometer = 1, .light = 0, .temperature_set = 1 },
1217/* MacMini: temperature set 2 */ 1220/* MacBook: accelerometer and temperature set 2 */
1218 { .accelerometer = 0, .light = 0, .temperature_set = 2 }, 1221 { .accelerometer = 1, .light = 0, .temperature_set = 2 },
1219/* MacPro: temperature set 3 */ 1222/* MacMini: temperature set 3 */
1220 { .accelerometer = 0, .light = 0, .temperature_set = 3 }, 1223 { .accelerometer = 0, .light = 0, .temperature_set = 3 },
1224/* MacPro: temperature set 4 */
1225 { .accelerometer = 0, .light = 0, .temperature_set = 4 },
1221}; 1226};
1222 1227
1223/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". 1228/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1229,16 +1234,20 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1229 (void*)&applesmc_dmi_data[0]}, 1234 (void*)&applesmc_dmi_data[0]},
1230 { applesmc_dmi_match, "Apple MacBook", { 1235 { applesmc_dmi_match, "Apple MacBook", {
1231 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1236 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1232 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") }, 1237 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") },
1233 (void*)&applesmc_dmi_data[1]}, 1238 (void*)&applesmc_dmi_data[1]},
1239 { applesmc_dmi_match, "Apple MacBook", {
1240 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1241 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
1242 (void*)&applesmc_dmi_data[2]},
1234 { applesmc_dmi_match, "Apple Macmini", { 1243 { applesmc_dmi_match, "Apple Macmini", {
1235 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1244 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1236 DMI_MATCH(DMI_PRODUCT_NAME,"Macmini") }, 1245 DMI_MATCH(DMI_PRODUCT_NAME,"Macmini") },
1237 (void*)&applesmc_dmi_data[2]}, 1246 (void*)&applesmc_dmi_data[3]},
1238 { applesmc_dmi_match, "Apple MacPro2", { 1247 { applesmc_dmi_match, "Apple MacPro2", {
1239 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1248 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1240 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, 1249 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
1241 (void*)&applesmc_dmi_data[3]}, 1250 (void*)&applesmc_dmi_data[4]},
1242 { .ident = NULL } 1251 { .ident = NULL }
1243}; 1252};
1244 1253
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index 950cea8d1d65..84712a22acea 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -49,7 +49,7 @@
49#include "lm75.h" 49#include "lm75.h"
50 50
51/* I2C addresses to scan */ 51/* I2C addresses to scan */
52static unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END }; 52static const unsigned short normal_i2c[] = { 0x2d, I2C_CLIENT_END };
53 53
54/* Insmod parameters */ 54/* Insmod parameters */
55I2C_CLIENT_INSMOD_1(asb100); 55I2C_CLIENT_INSMOD_1(asb100);
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index cce3350e539e..01c17e387f03 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -42,7 +42,7 @@ MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
42#define ATXP1_VIDMASK 0x1f 42#define ATXP1_VIDMASK 0x1f
43#define ATXP1_GPIO1MASK 0x0f 43#define ATXP1_GPIO1MASK 0x0f
44 44
45static unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END }; 45static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
46 46
47I2C_CLIENT_INSMOD_1(atxp1); 47I2C_CLIENT_INSMOD_1(atxp1);
48 48
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 3ee60d26e3a2..70239acecc8e 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -38,7 +38,8 @@
38 38
39#define DRVNAME "coretemp" 39#define DRVNAME "coretemp"
40 40
41typedef enum { SHOW_TEMP, SHOW_TJMAX, SHOW_LABEL, SHOW_NAME } SHOW; 41typedef enum { SHOW_TEMP, SHOW_TJMAX, SHOW_TTARGET, SHOW_LABEL,
42 SHOW_NAME } SHOW;
42 43
43/* 44/*
44 * Functions declaration 45 * Functions declaration
@@ -55,6 +56,7 @@ struct coretemp_data {
55 unsigned long last_updated; /* in jiffies */ 56 unsigned long last_updated; /* in jiffies */
56 int temp; 57 int temp;
57 int tjmax; 58 int tjmax;
59 int ttarget;
58 u8 alarm; 60 u8 alarm;
59}; 61};
60 62
@@ -93,9 +95,10 @@ static ssize_t show_temp(struct device *dev,
93 95
94 if (attr->index == SHOW_TEMP) 96 if (attr->index == SHOW_TEMP)
95 err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN; 97 err = data->valid ? sprintf(buf, "%d\n", data->temp) : -EAGAIN;
96 else 98 else if (attr->index == SHOW_TJMAX)
97 err = sprintf(buf, "%d\n", data->tjmax); 99 err = sprintf(buf, "%d\n", data->tjmax);
98 100 else
101 err = sprintf(buf, "%d\n", data->ttarget);
99 return err; 102 return err;
100} 103}
101 104
@@ -103,6 +106,8 @@ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL,
103 SHOW_TEMP); 106 SHOW_TEMP);
104static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, 107static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL,
105 SHOW_TJMAX); 108 SHOW_TJMAX);
109static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_temp, NULL,
110 SHOW_TTARGET);
106static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL); 111static DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL);
107static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL); 112static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_name, NULL, SHOW_LABEL);
108static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME); 113static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, SHOW_NAME);
@@ -147,6 +152,56 @@ static struct coretemp_data *coretemp_update_device(struct device *dev)
147 return data; 152 return data;
148} 153}
149 154
155static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
156{
157 /* The 100C is default for both mobile and non mobile CPUs */
158
159 int tjmax = 100000;
160 int ismobile = 1;
161 int err;
162 u32 eax, edx;
163
164 /* Early chips have no MSR for TjMax */
165
166 if ((c->x86_model == 0xf) && (c->x86_mask < 4)) {
167 ismobile = 0;
168 }
169
170 if ((c->x86_model > 0xe) && (ismobile)) {
171
172 /* Now we can detect the mobile CPU using Intel provided table
173 http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
174 For Core2 cores, check MSR 0x17, bit 28 1 = Mobile CPU
175 */
176
177 err = rdmsr_safe_on_cpu(id, 0x17, &eax, &edx);
178 if (err) {
179 dev_warn(dev,
180 "Unable to access MSR 0x17, assuming desktop"
181 " CPU\n");
182 ismobile = 0;
183 } else if (!(eax & 0x10000000)) {
184 ismobile = 0;
185 }
186 }
187
188 if (ismobile) {
189
190 err = rdmsr_safe_on_cpu(id, 0xee, &eax, &edx);
191 if (err) {
192 dev_warn(dev,
193 "Unable to access MSR 0xEE, for Tjmax, left"
194 " at default");
195 } else if (eax & 0x40000000) {
196 tjmax = 85000;
197 }
198 } else {
199 dev_warn(dev, "Using relative temperature scale!\n");
200 }
201
202 return tjmax;
203}
204
150static int __devinit coretemp_probe(struct platform_device *pdev) 205static int __devinit coretemp_probe(struct platform_device *pdev)
151{ 206{
152 struct coretemp_data *data; 207 struct coretemp_data *data;
@@ -163,8 +218,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
163 data->id = pdev->id; 218 data->id = pdev->id;
164 data->name = "coretemp"; 219 data->name = "coretemp";
165 mutex_init(&data->update_lock); 220 mutex_init(&data->update_lock);
166 /* Tjmax default is 100 degrees C */
167 data->tjmax = 100000;
168 221
169 /* test if we can access the THERM_STATUS MSR */ 222 /* test if we can access the THERM_STATUS MSR */
170 err = rdmsr_safe_on_cpu(data->id, MSR_IA32_THERM_STATUS, &eax, &edx); 223 err = rdmsr_safe_on_cpu(data->id, MSR_IA32_THERM_STATUS, &eax, &edx);
@@ -191,40 +244,29 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
191 } 244 }
192 } 245 }
193 246
194 /* Some processors have Tjmax 85 following magic should detect it 247 data->tjmax = adjust_tjmax(c, data->id, &pdev->dev);
195 Intel won't disclose the information without signed NDA, but 248 platform_set_drvdata(pdev, data);
196 individuals cannot sign it. Catch(ed) 22.
197 */
198 249
199 if (((c->x86_model == 0xf) && (c->x86_mask > 3)) || 250 /* read the still undocumented IA32_TEMPERATURE_TARGET it exists
200 (c->x86_model == 0xe)) { 251 on older CPUs but not in this register */
201 err = rdmsr_safe_on_cpu(data->id, 0xee, &eax, &edx); 252
253 if (c->x86_model > 0xe) {
254 err = rdmsr_safe_on_cpu(data->id, 0x1a2, &eax, &edx);
202 if (err) { 255 if (err) {
203 dev_warn(&pdev->dev, 256 dev_warn(&pdev->dev, "Unable to read"
204 "Unable to access MSR 0xEE, Tjmax left at %d " 257 " IA32_TEMPERATURE_TARGET MSR\n");
205 "degrees C\n", data->tjmax/1000); 258 } else {
206 } else if (eax & 0x40000000) { 259 data->ttarget = data->tjmax -
207 data->tjmax = 85000; 260 (((eax >> 8) & 0xff) * 1000);
261 err = device_create_file(&pdev->dev,
262 &sensor_dev_attr_temp1_max.dev_attr);
263 if (err)
264 goto exit_free;
208 } 265 }
209 } 266 }
210 267
211 /* Intel says that above should not work for desktop Core2 processors,
212 but it seems to work. There is no other way how get the absolute
213 readings. Warn the user about this. First check if are desktop,
214 bit 50 of MSR_IA32_PLATFORM_ID should be 0.
215 */
216
217 rdmsr_safe_on_cpu(data->id, MSR_IA32_PLATFORM_ID, &eax, &edx);
218
219 if ((c->x86_model == 0xf) && (!(edx & 0x00040000))) {
220 dev_warn(&pdev->dev, "Using undocumented features, absolute "
221 "temperature might be wrong!\n");
222 }
223
224 platform_set_drvdata(pdev, data);
225
226 if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group))) 268 if ((err = sysfs_create_group(&pdev->dev.kobj, &coretemp_group)))
227 goto exit_free; 269 goto exit_dev;
228 270
229 data->hwmon_dev = hwmon_device_register(&pdev->dev); 271 data->hwmon_dev = hwmon_device_register(&pdev->dev);
230 if (IS_ERR(data->hwmon_dev)) { 272 if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +280,8 @@ static int __devinit coretemp_probe(struct platform_device *pdev)
238 280
239exit_class: 281exit_class:
240 sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); 282 sysfs_remove_group(&pdev->dev.kobj, &coretemp_group);
283exit_dev:
284 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
241exit_free: 285exit_free:
242 kfree(data); 286 kfree(data);
243exit: 287exit:
@@ -250,6 +294,7 @@ static int __devexit coretemp_remove(struct platform_device *pdev)
250 294
251 hwmon_device_unregister(data->hwmon_dev); 295 hwmon_device_unregister(data->hwmon_dev);
252 sysfs_remove_group(&pdev->dev.kobj, &coretemp_group); 296 sysfs_remove_group(&pdev->dev.kobj, &coretemp_group);
297 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
253 platform_set_drvdata(pdev, NULL); 298 platform_set_drvdata(pdev, NULL);
254 kfree(data); 299 kfree(data);
255 return 0; 300 return 0;
@@ -330,7 +375,7 @@ static void coretemp_device_remove(unsigned int cpu)
330 mutex_unlock(&pdev_list_mutex); 375 mutex_unlock(&pdev_list_mutex);
331} 376}
332 377
333static int coretemp_cpu_callback(struct notifier_block *nfb, 378static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
334 unsigned long action, void *hcpu) 379 unsigned long action, void *hcpu)
335{ 380{
336 unsigned int cpu = (unsigned long) hcpu; 381 unsigned int cpu = (unsigned long) hcpu;
@@ -347,7 +392,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
347 return NOTIFY_OK; 392 return NOTIFY_OK;
348} 393}
349 394
350static struct notifier_block coretemp_cpu_notifier = { 395static struct notifier_block coretemp_cpu_notifier __refdata = {
351 .notifier_call = coretemp_cpu_callback, 396 .notifier_call = coretemp_cpu_callback,
352}; 397};
353#endif /* !CONFIG_HOTPLUG_CPU */ 398#endif /* !CONFIG_HOTPLUG_CPU */
@@ -368,10 +413,10 @@ static int __init coretemp_init(void)
368 for_each_online_cpu(i) { 413 for_each_online_cpu(i) {
369 struct cpuinfo_x86 *c = &cpu_data(i); 414 struct cpuinfo_x86 *c = &cpu_data(i);
370 415
371 /* check if family 6, models e, f, 16 */ 416 /* check if family 6, models 0xe, 0xf, 0x16, 0x17 */
372 if ((c->cpuid_level < 0) || (c->x86 != 0x6) || 417 if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
373 !((c->x86_model == 0xe) || (c->x86_model == 0xf) || 418 !((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
374 (c->x86_model == 0x16))) { 419 (c->x86_model == 0x16) || (c->x86_model == 0x17))) {
375 420
376 /* supported CPU not found, but report the unknown 421 /* supported CPU not found, but report the unknown
377 family 6 CPU */ 422 family 6 CPU */
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index ddddd9f34c19..7673f65877e1 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -49,7 +49,7 @@ module_param(force_id, ushort, 0);
49MODULE_PARM_DESC(force_id, "Override the detected device ID"); 49MODULE_PARM_DESC(force_id, "Override the detected device ID");
50 50
51/* Addresses to scan */ 51/* Addresses to scan */
52static unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END}; 52static const unsigned short normal_i2c[] = {0x2c, 0x2d, 0x2e, I2C_CLIENT_END};
53 53
54/* Insmod parameters */ 54/* Insmod parameters */
55I2C_CLIENT_INSMOD_1(dme1737); 55I2C_CLIENT_INSMOD_1(dme1737);
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index 3f5163de13c1..5f300ffed657 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -34,7 +34,7 @@
34#include "lm75.h" 34#include "lm75.h"
35 35
36/* Addresses to scan */ 36/* Addresses to scan */
37static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 37static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
38 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 38 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
39 39
40/* Insmod parameters */ 40/* Insmod parameters */
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 6892f76fc18a..1464338e4e11 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -37,7 +37,7 @@
37#include <linux/f75375s.h> 37#include <linux/f75375s.h>
38 38
39/* Addresses to scan */ 39/* Addresses to scan */
40static unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END }; 40static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
41 41
42/* Insmod parameters */ 42/* Insmod parameters */
43I2C_CLIENT_INSMOD_2(f75373, f75375); 43I2C_CLIENT_INSMOD_2(f75373, f75375);
diff --git a/drivers/hwmon/fscher.c b/drivers/hwmon/fscher.c
index 721c70177b17..ed26b66e0831 100644
--- a/drivers/hwmon/fscher.c
+++ b/drivers/hwmon/fscher.c
@@ -40,7 +40,7 @@
40 * Addresses to scan 40 * Addresses to scan
41 */ 41 */
42 42
43static unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END }; 43static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
44 44
45/* 45/*
46 * Insmod parameters 46 * Insmod parameters
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index b7c9eef0f928..bd89d270a5ed 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -44,7 +44,7 @@
44#include <linux/dmi.h> 44#include <linux/dmi.h>
45 45
46/* Addresses to scan */ 46/* Addresses to scan */
47static unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END }; 47static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
48 48
49/* Insmod parameters */ 49/* Insmod parameters */
50I2C_CLIENT_INSMOD_5(fscpos, fscher, fscscy, fschrc, fschmd); 50I2C_CLIENT_INSMOD_5(fscpos, fscher, fscscy, fschrc, fschmd);
diff --git a/drivers/hwmon/fscpos.c b/drivers/hwmon/fscpos.c
index 2f1075323a1e..00f48484e54b 100644
--- a/drivers/hwmon/fscpos.c
+++ b/drivers/hwmon/fscpos.c
@@ -43,7 +43,7 @@
43/* 43/*
44 * Addresses to scan 44 * Addresses to scan
45 */ 45 */
46static unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END }; 46static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
47 47
48/* 48/*
49 * Insmod parameters 49 * Insmod parameters
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 3b1ac48fce23..33e9e8a8d1ce 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -44,7 +44,7 @@
44#include <linux/sysfs.h> 44#include <linux/sysfs.h>
45 45
46/* Addresses to scan */ 46/* Addresses to scan */
47static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; 47static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
48 48
49/* Insmod parameters */ 49/* Insmod parameters */
50I2C_CLIENT_INSMOD_2(gl518sm_r00, gl518sm_r80); 50I2C_CLIENT_INSMOD_2(gl518sm_r00, gl518sm_r80);
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 03ecdc334764..8984ef141627 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -39,7 +39,7 @@ module_param(extra_sensor_type, ushort, 0);
39MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=temperature, 2=voltage)"); 39MODULE_PARM_DESC(extra_sensor_type, "Type of extra sensor (0=autodetect, 1=temperature, 2=voltage)");
40 40
41/* Addresses to scan */ 41/* Addresses to scan */
42static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; 42static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
43 43
44/* Insmod parameters */ 44/* Insmod parameters */
45I2C_CLIENT_INSMOD_1(gl520sm); 45I2C_CLIENT_INSMOD_1(gl520sm);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 650b07d5b902..116287008083 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -53,7 +53,7 @@
53 * Address is fully defined internally and cannot be changed. 53 * Address is fully defined internally and cannot be changed.
54 */ 54 */
55 55
56static unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END }; 56static const unsigned short normal_i2c[] = { 0x4c, I2C_CLIENT_END };
57 57
58/* 58/*
59 * Insmod parameters 59 * Insmod parameters
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index e5c35a355a57..115f4090b98e 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -31,7 +31,7 @@
31 31
32 32
33/* Addresses to scan */ 33/* Addresses to scan */
34static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 34static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
35 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 35 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
36 36
37/* Insmod parameters */ 37/* Insmod parameters */
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 459b70ad6bee..36d5a8c3ad8c 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -36,7 +36,8 @@
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37 37
38/* Addresses to scan */ 38/* Addresses to scan */
39static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, I2C_CLIENT_END }; 39static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
40 I2C_CLIENT_END };
40 41
41/* Insmod parameters */ 42/* Insmod parameters */
42I2C_CLIENT_INSMOD_1(lm77); 43I2C_CLIENT_INSMOD_1(lm77);
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 0a9eb1f6f4e4..ed7859f0e16a 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -37,8 +37,8 @@
37static struct platform_device *pdev; 37static struct platform_device *pdev;
38 38
39/* Addresses to scan */ 39/* Addresses to scan */
40static unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 40static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
41 0x2e, 0x2f, I2C_CLIENT_END }; 41 0x2e, 0x2f, I2C_CLIENT_END };
42static unsigned short isa_address = 0x290; 42static unsigned short isa_address = 0x290;
43 43
44/* Insmod parameters */ 44/* Insmod parameters */
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index a2ca055f3922..26c91c9d4769 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -32,8 +32,8 @@
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33 33
34/* Addresses to scan */ 34/* Addresses to scan */
35static unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 35static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
36 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; 36 0x2e, 0x2f, I2C_CLIENT_END };
37 37
38/* Insmod parameters */ 38/* Insmod parameters */
39I2C_CLIENT_INSMOD_1(lm80); 39I2C_CLIENT_INSMOD_1(lm80);
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 6e8903a6e902..6a8642fa25fb 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -48,10 +48,8 @@
48 * addresses. 48 * addresses.
49 */ 49 */
50 50
51static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 51static const unsigned short normal_i2c[] = {
52 0x29, 0x2a, 0x2b, 52 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
53 0x4c, 0x4d, 0x4e,
54 I2C_CLIENT_END };
55 53
56/* 54/*
57 * Insmod parameters 55 * Insmod parameters
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 4bb0f291a6b8..182fe6a5605f 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -35,7 +35,7 @@
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36 36
37/* Addresses to scan */ 37/* Addresses to scan */
38static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 38static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
39 39
40/* Insmod parameters */ 40/* Insmod parameters */
41I2C_CLIENT_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102); 41I2C_CLIENT_INSMOD_6(lm85b, lm85c, adm1027, adt7463, emc6d100, emc6d102);
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 8ee07c5c97a1..e1c183f0aae0 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -73,7 +73,7 @@
73 * LM87 has three possible addresses: 0x2c, 0x2d and 0x2e. 73 * LM87 has three possible addresses: 0x2c, 0x2d and 0x2e.
74 */ 74 */
75 75
76static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 76static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
77 77
78/* 78/*
79 * Insmod parameters 79 * Insmod parameters
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index f7ec95bedbf6..d1a3da3dd8e0 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -101,10 +101,8 @@
101 * 0x4c, 0x4d or 0x4e. 101 * 0x4c, 0x4d or 0x4e.
102 */ 102 */
103 103
104static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 104static const unsigned short normal_i2c[] = {
105 0x29, 0x2a, 0x2b, 105 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
106 0x4c, 0x4d, 0x4e,
107 I2C_CLIENT_END };
108 106
109/* 107/*
110 * Insmod parameters 108 * Insmod parameters
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index af5c77d568fe..c31942e08246 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -45,13 +45,14 @@
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/i2c.h> 46#include <linux/i2c.h>
47#include <linux/hwmon.h> 47#include <linux/hwmon.h>
48#include <linux/hwmon-sysfs.h>
48#include <linux/err.h> 49#include <linux/err.h>
49#include <linux/mutex.h> 50#include <linux/mutex.h>
50 51
51/* The LM92 and MAX6635 have 2 two-state pins for address selection, 52/* The LM92 and MAX6635 have 2 two-state pins for address selection,
52 resulting in 4 possible addresses. */ 53 resulting in 4 possible addresses. */
53static unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 54static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
54 I2C_CLIENT_END }; 55 I2C_CLIENT_END };
55 56
56/* Insmod parameters */ 57/* Insmod parameters */
57I2C_CLIENT_INSMOD_1(lm92); 58I2C_CLIENT_INSMOD_1(lm92);
@@ -209,6 +210,14 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, ch
209 return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp1_input)); 210 return sprintf(buf, "%d\n", ALARMS_FROM_REG(data->temp1_input));
210} 211}
211 212
213static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
214 char *buf)
215{
216 int bitnr = to_sensor_dev_attr(attr)->index;
217 struct lm92_data *data = lm92_update_device(dev);
218 return sprintf(buf, "%d\n", (data->temp1_input >> bitnr) & 1);
219}
220
212static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1_input, NULL); 221static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp1_input, NULL);
213static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp1_crit, 222static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp1_crit,
214 set_temp1_crit); 223 set_temp1_crit);
@@ -221,6 +230,9 @@ static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp1_max,
221 set_temp1_max); 230 set_temp1_max);
222static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp1_max_hyst, NULL); 231static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_temp1_max_hyst, NULL);
223static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 232static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
233static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 2);
234static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
235static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
224 236
225 237
226/* 238/*
@@ -297,7 +309,9 @@ static struct attribute *lm92_attributes[] = {
297 &dev_attr_temp1_max.attr, 309 &dev_attr_temp1_max.attr,
298 &dev_attr_temp1_max_hyst.attr, 310 &dev_attr_temp1_max_hyst.attr,
299 &dev_attr_alarms.attr, 311 &dev_attr_alarms.attr,
300 312 &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
313 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
314 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
301 NULL 315 NULL
302}; 316};
303 317
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index ea61946a4bf7..5e678f5c883d 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -142,7 +142,7 @@
142 I2C_FUNC_SMBUS_WORD_DATA) 142 I2C_FUNC_SMBUS_WORD_DATA)
143 143
144/* Addresses to scan */ 144/* Addresses to scan */
145static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 145static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
146 146
147/* Insmod parameters */ 147/* Insmod parameters */
148I2C_CLIENT_INSMOD_1(lm93); 148I2C_CLIENT_INSMOD_1(lm93);
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 38a44c3d6cee..7e7267a04544 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -32,14 +32,13 @@
32#include <linux/jiffies.h> 32#include <linux/jiffies.h>
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/hwmon.h> 34#include <linux/hwmon.h>
35#include <linux/hwmon-sysfs.h>
35#include <linux/err.h> 36#include <linux/err.h>
36#include <linux/mutex.h> 37#include <linux/mutex.h>
37#include <linux/sysfs.h> 38#include <linux/sysfs.h>
38 39
39static unsigned short normal_i2c[] = { 0x18, 0x19, 0x1a, 40static const unsigned short normal_i2c[] = {
40 0x29, 0x2a, 0x2b, 41 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
41 0x4c, 0x4d, 0x4e,
42 I2C_CLIENT_END };
43 42
44/* 43/*
45 * Insmod parameters 44 * Insmod parameters
@@ -161,6 +160,14 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, ch
161 return sprintf(buf, "%d\n", data->alarms); 160 return sprintf(buf, "%d\n", data->alarms);
162} 161}
163 162
163static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
164 char *buf)
165{
166 int bitnr = to_sensor_dev_attr(attr)->index;
167 struct max1619_data *data = max1619_update_device(dev);
168 return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
169}
170
164static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL); 171static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL);
165static DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input2, NULL); 172static DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input2, NULL);
166static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_low2, 173static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_low2,
@@ -172,6 +179,10 @@ static DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit2,
172static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst2, 179static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst2,
173 set_temp_hyst2); 180 set_temp_hyst2);
174static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 181static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
182static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
183static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
184static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
185static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
175 186
176static struct attribute *max1619_attributes[] = { 187static struct attribute *max1619_attributes[] = {
177 &dev_attr_temp1_input.attr, 188 &dev_attr_temp1_input.attr,
@@ -182,6 +193,10 @@ static struct attribute *max1619_attributes[] = {
182 &dev_attr_temp2_crit_hyst.attr, 193 &dev_attr_temp2_crit_hyst.attr,
183 194
184 &dev_attr_alarms.attr, 195 &dev_attr_alarms.attr,
196 &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
197 &sensor_dev_attr_temp2_fault.dev_attr.attr,
198 &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
199 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
185 NULL 200 NULL
186}; 201};
187 202
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 755570c1f4eb..52d528b76cc3 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -44,7 +44,8 @@
44 * Addresses to scan. There are four disjoint possibilities, by pin config. 44 * Addresses to scan. There are four disjoint possibilities, by pin config.
45 */ 45 */
46 46
47static unsigned short normal_i2c[] = {0x1b, 0x1f, 0x48, 0x4b, I2C_CLIENT_END}; 47static const unsigned short normal_i2c[] = {0x1b, 0x1f, 0x48, 0x4b,
48 I2C_CLIENT_END};
48 49
49/* 50/*
50 * Insmod parameters 51 * Insmod parameters
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 0d7f0c4d06bb..d1b498548736 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -198,6 +198,14 @@ static ssize_t get_fan_div(struct device *dev, struct device_attribute
198 return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index])); 198 return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index]));
199} 199}
200 200
201static ssize_t get_fan_alarm(struct device *dev, struct device_attribute
202 *devattr, char *buf)
203{
204 int bitnr = to_sensor_dev_attr(devattr)->index;
205 struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
206 return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
207}
208
201static ssize_t get_pwm(struct device *dev, struct device_attribute 209static ssize_t get_pwm(struct device *dev, struct device_attribute
202 *devattr, char *buf) 210 *devattr, char *buf)
203{ 211{
@@ -347,6 +355,8 @@ static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
347 get_fan_min, set_fan_min, offset - 1); \ 355 get_fan_min, set_fan_min, offset - 1); \
348static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ 356static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
349 get_fan_div, set_fan_div, offset - 1); \ 357 get_fan_div, set_fan_div, offset - 1); \
358static SENSOR_DEVICE_ATTR(fan##offset##_alarm, S_IRUGO, get_fan_alarm, \
359 NULL, offset - 1); \
350static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \ 360static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
351 get_pwm, set_pwm, offset - 1); \ 361 get_pwm, set_pwm, offset - 1); \
352static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \ 362static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
@@ -374,12 +384,15 @@ static struct attribute *smsc47m1_attributes[] = {
374 &sensor_dev_attr_fan1_input.dev_attr.attr, 384 &sensor_dev_attr_fan1_input.dev_attr.attr,
375 &sensor_dev_attr_fan1_min.dev_attr.attr, 385 &sensor_dev_attr_fan1_min.dev_attr.attr,
376 &sensor_dev_attr_fan1_div.dev_attr.attr, 386 &sensor_dev_attr_fan1_div.dev_attr.attr,
387 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
377 &sensor_dev_attr_fan2_input.dev_attr.attr, 388 &sensor_dev_attr_fan2_input.dev_attr.attr,
378 &sensor_dev_attr_fan2_min.dev_attr.attr, 389 &sensor_dev_attr_fan2_min.dev_attr.attr,
379 &sensor_dev_attr_fan2_div.dev_attr.attr, 390 &sensor_dev_attr_fan2_div.dev_attr.attr,
391 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
380 &sensor_dev_attr_fan3_input.dev_attr.attr, 392 &sensor_dev_attr_fan3_input.dev_attr.attr,
381 &sensor_dev_attr_fan3_min.dev_attr.attr, 393 &sensor_dev_attr_fan3_min.dev_attr.attr,
382 &sensor_dev_attr_fan3_div.dev_attr.attr, 394 &sensor_dev_attr_fan3_div.dev_attr.attr,
395 &sensor_dev_attr_fan3_alarm.dev_attr.attr,
383 396
384 &sensor_dev_attr_pwm1.dev_attr.attr, 397 &sensor_dev_attr_pwm1.dev_attr.attr,
385 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 398 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
@@ -533,7 +546,9 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
533 || (err = device_create_file(dev, 546 || (err = device_create_file(dev,
534 &sensor_dev_attr_fan1_min.dev_attr)) 547 &sensor_dev_attr_fan1_min.dev_attr))
535 || (err = device_create_file(dev, 548 || (err = device_create_file(dev,
536 &sensor_dev_attr_fan1_div.dev_attr))) 549 &sensor_dev_attr_fan1_div.dev_attr))
550 || (err = device_create_file(dev,
551 &sensor_dev_attr_fan1_alarm.dev_attr)))
537 goto error_remove_files; 552 goto error_remove_files;
538 } else 553 } else
539 dev_dbg(dev, "Fan 1 not enabled by hardware, skipping\n"); 554 dev_dbg(dev, "Fan 1 not enabled by hardware, skipping\n");
@@ -544,7 +559,9 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
544 || (err = device_create_file(dev, 559 || (err = device_create_file(dev,
545 &sensor_dev_attr_fan2_min.dev_attr)) 560 &sensor_dev_attr_fan2_min.dev_attr))
546 || (err = device_create_file(dev, 561 || (err = device_create_file(dev,
547 &sensor_dev_attr_fan2_div.dev_attr))) 562 &sensor_dev_attr_fan2_div.dev_attr))
563 || (err = device_create_file(dev,
564 &sensor_dev_attr_fan2_alarm.dev_attr)))
548 goto error_remove_files; 565 goto error_remove_files;
549 } else 566 } else
550 dev_dbg(dev, "Fan 2 not enabled by hardware, skipping\n"); 567 dev_dbg(dev, "Fan 2 not enabled by hardware, skipping\n");
@@ -555,7 +572,9 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
555 || (err = device_create_file(dev, 572 || (err = device_create_file(dev,
556 &sensor_dev_attr_fan3_min.dev_attr)) 573 &sensor_dev_attr_fan3_min.dev_attr))
557 || (err = device_create_file(dev, 574 || (err = device_create_file(dev,
558 &sensor_dev_attr_fan3_div.dev_attr))) 575 &sensor_dev_attr_fan3_div.dev_attr))
576 || (err = device_create_file(dev,
577 &sensor_dev_attr_fan3_alarm.dev_attr)))
559 goto error_remove_files; 578 goto error_remove_files;
560 } else if (data->type == smsc47m2) 579 } else if (data->type == smsc47m2)
561 dev_dbg(dev, "Fan 3 not enabled by hardware, skipping\n"); 580 dev_dbg(dev, "Fan 3 not enabled by hardware, skipping\n");
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 8b0c188e60f6..3c9db6598ba7 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -34,7 +34,7 @@
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35 35
36/* Addresses to scan */ 36/* Addresses to scan */
37static unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END }; 37static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
38 38
39/* Insmod parameters */ 39/* Insmod parameters */
40I2C_CLIENT_INSMOD_1(smsc47m192); 40I2C_CLIENT_INSMOD_1(smsc47m192);
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 04dd7699b3ac..76a3859c3fbe 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -32,7 +32,7 @@
32MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
33 33
34/* Addresses to scan */ 34/* Addresses to scan */
35static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; 35static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
36 36
37/* Insmod parameters */ 37/* Insmod parameters */
38I2C_CLIENT_INSMOD_2(thmc50, adm1022); 38I2C_CLIENT_INSMOD_2(thmc50, adm1022);
@@ -52,9 +52,9 @@ I2C_CLIENT_MODULE_PARM(adm1022_temp3, "List of adapter,address pairs "
52 */ 52 */
53#define THMC50_REG_INTR 0x41 53#define THMC50_REG_INTR 0x41
54 54
55const static u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 }; 55static const u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 };
56const static u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C }; 56static const u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C };
57const static u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B }; 57static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B };
58 58
59#define THMC50_REG_CONF_nFANOFF 0x20 59#define THMC50_REG_CONF_nFANOFF 0x20
60 60
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 2635bba1e3fc..f1ee5e731968 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -533,6 +533,24 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, ch
533} 533}
534static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 534static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
535 535
536static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
537 char *buf)
538{
539 int bitnr = to_sensor_dev_attr(attr)->index;
540 struct via686a_data *data = via686a_update_device(dev);
541 return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
542}
543static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
544static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
545static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
546static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
547static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
548static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
549static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11);
550static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 15);
551static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
552static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
553
536static ssize_t show_name(struct device *dev, struct device_attribute 554static ssize_t show_name(struct device *dev, struct device_attribute
537 *devattr, char *buf) 555 *devattr, char *buf)
538{ 556{
@@ -557,6 +575,11 @@ static struct attribute *via686a_attributes[] = {
557 &sensor_dev_attr_in2_max.dev_attr.attr, 575 &sensor_dev_attr_in2_max.dev_attr.attr,
558 &sensor_dev_attr_in3_max.dev_attr.attr, 576 &sensor_dev_attr_in3_max.dev_attr.attr,
559 &sensor_dev_attr_in4_max.dev_attr.attr, 577 &sensor_dev_attr_in4_max.dev_attr.attr,
578 &sensor_dev_attr_in0_alarm.dev_attr.attr,
579 &sensor_dev_attr_in1_alarm.dev_attr.attr,
580 &sensor_dev_attr_in2_alarm.dev_attr.attr,
581 &sensor_dev_attr_in3_alarm.dev_attr.attr,
582 &sensor_dev_attr_in4_alarm.dev_attr.attr,
560 583
561 &sensor_dev_attr_temp1_input.dev_attr.attr, 584 &sensor_dev_attr_temp1_input.dev_attr.attr,
562 &sensor_dev_attr_temp2_input.dev_attr.attr, 585 &sensor_dev_attr_temp2_input.dev_attr.attr,
@@ -567,6 +590,9 @@ static struct attribute *via686a_attributes[] = {
567 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, 590 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
568 &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, 591 &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
569 &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, 592 &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
593 &sensor_dev_attr_temp1_alarm.dev_attr.attr,
594 &sensor_dev_attr_temp2_alarm.dev_attr.attr,
595 &sensor_dev_attr_temp3_alarm.dev_attr.attr,
570 596
571 &sensor_dev_attr_fan1_input.dev_attr.attr, 597 &sensor_dev_attr_fan1_input.dev_attr.attr,
572 &sensor_dev_attr_fan2_input.dev_attr.attr, 598 &sensor_dev_attr_fan2_input.dev_attr.attr,
@@ -574,6 +600,8 @@ static struct attribute *via686a_attributes[] = {
574 &sensor_dev_attr_fan2_min.dev_attr.attr, 600 &sensor_dev_attr_fan2_min.dev_attr.attr,
575 &sensor_dev_attr_fan1_div.dev_attr.attr, 601 &sensor_dev_attr_fan1_div.dev_attr.attr,
576 &sensor_dev_attr_fan2_div.dev_attr.attr, 602 &sensor_dev_attr_fan2_div.dev_attr.attr,
603 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
604 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
577 605
578 &dev_attr_alarms.attr, 606 &dev_attr_alarms.attr,
579 &dev_attr_name.attr, 607 &dev_attr_name.attr,
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index f87661775fe0..5bc57275cae8 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -2,7 +2,7 @@
2 vt8231.c - Part of lm_sensors, Linux kernel modules 2 vt8231.c - Part of lm_sensors, Linux kernel modules
3 for hardware monitoring 3 for hardware monitoring
4 4
5 Copyright (c) 2005 Roger Lucas <roger@planbit.co.uk> 5 Copyright (c) 2005 Roger Lucas <vt8231@hiddenengine.co.uk>
6 Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com> 6 Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
7 Aaron M. Marsh <amarsh@sdf.lonestar.org> 7 Aaron M. Marsh <amarsh@sdf.lonestar.org>
8 8
@@ -541,6 +541,28 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
541} 541}
542static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); 542static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
543 543
544static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
545 char *buf)
546{
547 int bitnr = to_sensor_dev_attr(attr)->index;
548 struct vt8231_data *data = vt8231_update_device(dev);
549 return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
550}
551static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
552static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11);
553static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0);
554static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL, 1);
555static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL, 3);
556static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL, 8);
557static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 11);
558static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0);
559static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 1);
560static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
561static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
562static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2);
563static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
564static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
565
544static ssize_t show_name(struct device *dev, struct device_attribute 566static ssize_t show_name(struct device *dev, struct device_attribute
545 *devattr, char *buf) 567 *devattr, char *buf)
546{ 568{
@@ -549,36 +571,42 @@ static ssize_t show_name(struct device *dev, struct device_attribute
549} 571}
550static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 572static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
551 573
552static struct attribute *vt8231_attributes_temps[6][4] = { 574static struct attribute *vt8231_attributes_temps[6][5] = {
553 { 575 {
554 &dev_attr_temp1_input.attr, 576 &dev_attr_temp1_input.attr,
555 &dev_attr_temp1_max_hyst.attr, 577 &dev_attr_temp1_max_hyst.attr,
556 &dev_attr_temp1_max.attr, 578 &dev_attr_temp1_max.attr,
579 &sensor_dev_attr_temp1_alarm.dev_attr.attr,
557 NULL 580 NULL
558 }, { 581 }, {
559 &sensor_dev_attr_temp2_input.dev_attr.attr, 582 &sensor_dev_attr_temp2_input.dev_attr.attr,
560 &sensor_dev_attr_temp2_max_hyst.dev_attr.attr, 583 &sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
561 &sensor_dev_attr_temp2_max.dev_attr.attr, 584 &sensor_dev_attr_temp2_max.dev_attr.attr,
585 &sensor_dev_attr_temp2_alarm.dev_attr.attr,
562 NULL 586 NULL
563 }, { 587 }, {
564 &sensor_dev_attr_temp3_input.dev_attr.attr, 588 &sensor_dev_attr_temp3_input.dev_attr.attr,
565 &sensor_dev_attr_temp3_max_hyst.dev_attr.attr, 589 &sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
566 &sensor_dev_attr_temp3_max.dev_attr.attr, 590 &sensor_dev_attr_temp3_max.dev_attr.attr,
591 &sensor_dev_attr_temp3_alarm.dev_attr.attr,
567 NULL 592 NULL
568 }, { 593 }, {
569 &sensor_dev_attr_temp4_input.dev_attr.attr, 594 &sensor_dev_attr_temp4_input.dev_attr.attr,
570 &sensor_dev_attr_temp4_max_hyst.dev_attr.attr, 595 &sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
571 &sensor_dev_attr_temp4_max.dev_attr.attr, 596 &sensor_dev_attr_temp4_max.dev_attr.attr,
597 &sensor_dev_attr_temp4_alarm.dev_attr.attr,
572 NULL 598 NULL
573 }, { 599 }, {
574 &sensor_dev_attr_temp5_input.dev_attr.attr, 600 &sensor_dev_attr_temp5_input.dev_attr.attr,
575 &sensor_dev_attr_temp5_max_hyst.dev_attr.attr, 601 &sensor_dev_attr_temp5_max_hyst.dev_attr.attr,
576 &sensor_dev_attr_temp5_max.dev_attr.attr, 602 &sensor_dev_attr_temp5_max.dev_attr.attr,
603 &sensor_dev_attr_temp5_alarm.dev_attr.attr,
577 NULL 604 NULL
578 }, { 605 }, {
579 &sensor_dev_attr_temp6_input.dev_attr.attr, 606 &sensor_dev_attr_temp6_input.dev_attr.attr,
580 &sensor_dev_attr_temp6_max_hyst.dev_attr.attr, 607 &sensor_dev_attr_temp6_max_hyst.dev_attr.attr,
581 &sensor_dev_attr_temp6_max.dev_attr.attr, 608 &sensor_dev_attr_temp6_max.dev_attr.attr,
609 &sensor_dev_attr_temp6_alarm.dev_attr.attr,
582 NULL 610 NULL
583 } 611 }
584}; 612};
@@ -592,36 +620,42 @@ static const struct attribute_group vt8231_group_temps[6] = {
592 { .attrs = vt8231_attributes_temps[5] }, 620 { .attrs = vt8231_attributes_temps[5] },
593}; 621};
594 622
595static struct attribute *vt8231_attributes_volts[6][4] = { 623static struct attribute *vt8231_attributes_volts[6][5] = {
596 { 624 {
597 &sensor_dev_attr_in0_input.dev_attr.attr, 625 &sensor_dev_attr_in0_input.dev_attr.attr,
598 &sensor_dev_attr_in0_min.dev_attr.attr, 626 &sensor_dev_attr_in0_min.dev_attr.attr,
599 &sensor_dev_attr_in0_max.dev_attr.attr, 627 &sensor_dev_attr_in0_max.dev_attr.attr,
628 &sensor_dev_attr_in0_alarm.dev_attr.attr,
600 NULL 629 NULL
601 }, { 630 }, {
602 &sensor_dev_attr_in1_input.dev_attr.attr, 631 &sensor_dev_attr_in1_input.dev_attr.attr,
603 &sensor_dev_attr_in1_min.dev_attr.attr, 632 &sensor_dev_attr_in1_min.dev_attr.attr,
604 &sensor_dev_attr_in1_max.dev_attr.attr, 633 &sensor_dev_attr_in1_max.dev_attr.attr,
634 &sensor_dev_attr_in1_alarm.dev_attr.attr,
605 NULL 635 NULL
606 }, { 636 }, {
607 &sensor_dev_attr_in2_input.dev_attr.attr, 637 &sensor_dev_attr_in2_input.dev_attr.attr,
608 &sensor_dev_attr_in2_min.dev_attr.attr, 638 &sensor_dev_attr_in2_min.dev_attr.attr,
609 &sensor_dev_attr_in2_max.dev_attr.attr, 639 &sensor_dev_attr_in2_max.dev_attr.attr,
640 &sensor_dev_attr_in2_alarm.dev_attr.attr,
610 NULL 641 NULL
611 }, { 642 }, {
612 &sensor_dev_attr_in3_input.dev_attr.attr, 643 &sensor_dev_attr_in3_input.dev_attr.attr,
613 &sensor_dev_attr_in3_min.dev_attr.attr, 644 &sensor_dev_attr_in3_min.dev_attr.attr,
614 &sensor_dev_attr_in3_max.dev_attr.attr, 645 &sensor_dev_attr_in3_max.dev_attr.attr,
646 &sensor_dev_attr_in3_alarm.dev_attr.attr,
615 NULL 647 NULL
616 }, { 648 }, {
617 &sensor_dev_attr_in4_input.dev_attr.attr, 649 &sensor_dev_attr_in4_input.dev_attr.attr,
618 &sensor_dev_attr_in4_min.dev_attr.attr, 650 &sensor_dev_attr_in4_min.dev_attr.attr,
619 &sensor_dev_attr_in4_max.dev_attr.attr, 651 &sensor_dev_attr_in4_max.dev_attr.attr,
652 &sensor_dev_attr_in4_alarm.dev_attr.attr,
620 NULL 653 NULL
621 }, { 654 }, {
622 &dev_attr_in5_input.attr, 655 &dev_attr_in5_input.attr,
623 &dev_attr_in5_min.attr, 656 &dev_attr_in5_min.attr,
624 &dev_attr_in5_max.attr, 657 &dev_attr_in5_max.attr,
658 &sensor_dev_attr_in5_alarm.dev_attr.attr,
625 NULL 659 NULL
626 } 660 }
627}; 661};
@@ -642,6 +676,8 @@ static struct attribute *vt8231_attributes[] = {
642 &sensor_dev_attr_fan2_min.dev_attr.attr, 676 &sensor_dev_attr_fan2_min.dev_attr.attr,
643 &sensor_dev_attr_fan1_div.dev_attr.attr, 677 &sensor_dev_attr_fan1_div.dev_attr.attr,
644 &sensor_dev_attr_fan2_div.dev_attr.attr, 678 &sensor_dev_attr_fan2_div.dev_attr.attr,
679 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
680 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
645 &dev_attr_alarms.attr, 681 &dev_attr_alarms.attr,
646 &dev_attr_name.attr, 682 &dev_attr_name.attr,
647 NULL 683 NULL
@@ -963,7 +999,7 @@ static void __exit sm_vt8231_exit(void)
963 } 999 }
964} 1000}
965 1001
966MODULE_AUTHOR("Roger Lucas <roger@planbit.co.uk>"); 1002MODULE_AUTHOR("Roger Lucas <vt8231@hiddenengine.co.uk>");
967MODULE_DESCRIPTION("VT8231 sensors"); 1003MODULE_DESCRIPTION("VT8231 sensors");
968MODULE_LICENSE("GPL"); 1004MODULE_LICENSE("GPL");
969 1005
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 7421f6ea53e1..5c85670e2d16 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -53,8 +53,8 @@
53static struct platform_device *pdev; 53static struct platform_device *pdev;
54 54
55/* Addresses to scan */ 55/* Addresses to scan */
56static unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 56static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
57 0x2e, 0x2f, I2C_CLIENT_END }; 57 0x2e, 0x2f, I2C_CLIENT_END };
58static unsigned short isa_address = 0x290; 58static unsigned short isa_address = 0x290;
59 59
60/* Insmod parameters */ 60/* Insmod parameters */
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 85bd21ee3298..85077c4c8039 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -47,7 +47,8 @@
47#define NUMBER_OF_TEMPIN 3 47#define NUMBER_OF_TEMPIN 3
48 48
49/* Addresses to scan */ 49/* Addresses to scan */
50static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; 50static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
51 I2C_CLIENT_END };
51 52
52/* Insmod parameters */ 53/* Insmod parameters */
53I2C_CLIENT_INSMOD_1(w83791d); 54I2C_CLIENT_INSMOD_1(w83791d);
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 007449d3e16e..299629d47ed6 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -46,7 +46,8 @@
46#include <linux/sysfs.h> 46#include <linux/sysfs.h>
47 47
48/* Addresses to scan */ 48/* Addresses to scan */
49static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; 49static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
50 I2C_CLIENT_END };
50 51
51/* Insmod parameters */ 52/* Insmod parameters */
52I2C_CLIENT_INSMOD_1(w83792d); 53I2C_CLIENT_INSMOD_1(w83792d);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 3ba1d6b33473..ee35af93b574 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -37,7 +37,8 @@
37#include <linux/mutex.h> 37#include <linux/mutex.h>
38 38
39/* Addresses to scan */ 39/* Addresses to scan */
40static unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; 40static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
41 I2C_CLIENT_END };
41 42
42/* Insmod parameters */ 43/* Insmod parameters */
43I2C_CLIENT_INSMOD_1(w83793); 44I2C_CLIENT_INSMOD_1(w83793);
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 1d6259d29e74..77f2d482888b 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -49,7 +49,7 @@
49 * Address is fully defined internally and cannot be changed. 49 * Address is fully defined internally and cannot be changed.
50 */ 50 */
51 51
52static unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END }; 52static const unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
53 53
54/* 54/*
55 * Insmod parameters 55 * Insmod parameters
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 1dbee4fa23ad..41e22ddb568a 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -35,7 +35,7 @@
35#include <linux/mutex.h> 35#include <linux/mutex.h>
36 36
37/* Addresses to scan */ 37/* Addresses to scan */
38static unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END }; 38static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END };
39 39
40/* Insmod parameters */ 40/* Insmod parameters */
41I2C_CLIENT_INSMOD_1(w83l786ng); 41I2C_CLIENT_INSMOD_1(w83l786ng);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index b61f56b6f311..476b0bb72d6c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -177,6 +177,8 @@ config I2C_I801
177 ESB2 177 ESB2
178 ICH8 178 ICH8
179 ICH9 179 ICH9
180 Tolapai
181 ICH10
180 182
181 This driver can also be built as a module. If so, the module 183 This driver can also be built as a module. If so, the module
182 will be called i2c-i801. 184 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index aa9157913b9a..b0f771fe4326 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -40,7 +40,9 @@
40 82801G (ICH7) 0x27da 32 hard yes yes yes 40 82801G (ICH7) 0x27da 32 hard yes yes yes
41 82801H (ICH8) 0x283e 32 hard yes yes yes 41 82801H (ICH8) 0x283e 32 hard yes yes yes
42 82801I (ICH9) 0x2930 32 hard yes yes yes 42 82801I (ICH9) 0x2930 32 hard yes yes yes
43 Tolapai 0x5032 32 hard yes ? ? 43 Tolapai 0x5032 32 hard yes yes yes
44 ICH10 0x3a30 32 hard yes yes yes
45 ICH10 0x3a60 32 hard yes yes yes
44 46
45 Features supported by this driver: 47 Features supported by this driver:
46 Software PEC no 48 Software PEC no
@@ -588,6 +590,8 @@ static struct pci_device_id i801_ids[] = {
588 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) }, 590 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
589 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) }, 591 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
590 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) }, 592 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
593 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
594 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
591 { 0, } 595 { 0, }
592}; 596};
593 597
@@ -608,10 +612,12 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
608 case PCI_DEVICE_ID_INTEL_ESB2_17: 612 case PCI_DEVICE_ID_INTEL_ESB2_17:
609 case PCI_DEVICE_ID_INTEL_ICH8_5: 613 case PCI_DEVICE_ID_INTEL_ICH8_5:
610 case PCI_DEVICE_ID_INTEL_ICH9_6: 614 case PCI_DEVICE_ID_INTEL_ICH9_6:
615 case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
616 case PCI_DEVICE_ID_INTEL_ICH10_4:
617 case PCI_DEVICE_ID_INTEL_ICH10_5:
611 i801_features |= FEATURE_I2C_BLOCK_READ; 618 i801_features |= FEATURE_I2C_BLOCK_READ;
612 /* fall through */ 619 /* fall through */
613 case PCI_DEVICE_ID_INTEL_82801DB_3: 620 case PCI_DEVICE_ID_INTEL_82801DB_3:
614 case PCI_DEVICE_ID_INTEL_TOLAPAI_1:
615 i801_features |= FEATURE_SMBUS_PEC; 621 i801_features |= FEATURE_SMBUS_PEC;
616 i801_features |= FEATURE_BLOCK_BUFFER; 622 i801_features |= FEATURE_BLOCK_BUFFER;
617 break; 623 break;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 5161aaf9341b..496ee875eb4f 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -125,6 +125,13 @@ static int __devinit pca_isa_probe(struct device *dev, unsigned int id)
125 125
126 dev_info(dev, "i/o base %#08lx. irq %d\n", base, irq); 126 dev_info(dev, "i/o base %#08lx. irq %d\n", base, irq);
127 127
128#ifdef CONFIG_PPC_MERGE
129 if (check_legacy_ioport(base)) {
130 dev_err(dev, "I/O address %#08lx is not available\n", base);
131 goto out;
132 }
133#endif
134
128 if (!request_region(base, IO_SIZE, "i2c-pca-isa")) { 135 if (!request_region(base, IO_SIZE, "i2c-pca-isa")) {
129 dev_err(dev, "I/O address %#08lx is in use\n", base); 136 dev_err(dev, "I/O address %#08lx is in use\n", base);
130 goto out; 137 goto out;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index be99c02ecac5..b03af5653c65 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -122,7 +122,7 @@ struct pmcmsptwi_data {
122}; 122};
123 123
124/* The default settings */ 124/* The default settings */
125const static struct pmcmsptwi_clockcfg pmcmsptwi_defclockcfg = { 125static const struct pmcmsptwi_clockcfg pmcmsptwi_defclockcfg = {
126 .standard = { 126 .standard = {
127 .filter = 0x3, 127 .filter = 0x3,
128 .clock = 0x1f, 128 .clock = 0x1f,
@@ -133,7 +133,7 @@ const static struct pmcmsptwi_clockcfg pmcmsptwi_defclockcfg = {
133 }, 133 },
134}; 134};
135 135
136const static struct pmcmsptwi_cfg pmcmsptwi_defcfg = { 136static const struct pmcmsptwi_cfg pmcmsptwi_defcfg = {
137 .arbf = 0x03, 137 .arbf = 0x03,
138 .nak = 0x03, 138 .nak = 0x03,
139 .add10 = 0x00, 139 .add10 = 0x00,
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 2598d29fd7a4..2d2087ad708f 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -138,11 +138,13 @@ static const struct bits icr_bits[] = {
138 PXA_BIT(ICR_UR, "UR", "ur"), 138 PXA_BIT(ICR_UR, "UR", "ur"),
139}; 139};
140 140
141#ifdef CONFIG_I2C_PXA_SLAVE
141static void decode_ICR(unsigned int val) 142static void decode_ICR(unsigned int val)
142{ 143{
143 decode_bits(KERN_DEBUG "ICR", icr_bits, ARRAY_SIZE(icr_bits), val); 144 decode_bits(KERN_DEBUG "ICR", icr_bits, ARRAY_SIZE(icr_bits), val);
144 printk("\n"); 145 printk("\n");
145} 146}
147#endif
146 148
147static unsigned int i2c_debug = DEBUG; 149static unsigned int i2c_debug = DEBUG;
148 150
@@ -997,7 +999,14 @@ static int i2c_pxa_probe(struct platform_device *dev)
997 spin_lock_init(&i2c->lock); 999 spin_lock_init(&i2c->lock);
998 init_waitqueue_head(&i2c->wait); 1000 init_waitqueue_head(&i2c->wait);
999 1001
1000 sprintf(i2c->adap.name, "pxa_i2c-i2c.%u", dev->id); 1002 /*
1003 * If "dev->id" is negative we consider it as zero.
1004 * The reason to do so is to avoid sysfs names that only make
1005 * sense when there are multiple adapters.
1006 */
1007 i2c->adap.nr = dev->id != -1 ? dev->id : 0;
1008 snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
1009 i2c->adap.nr);
1001 1010
1002 i2c->clk = clk_get(&dev->dev, "I2CCLK"); 1011 i2c->clk = clk_get(&dev->dev, "I2CCLK");
1003 if (IS_ERR(i2c->clk)) { 1012 if (IS_ERR(i2c->clk)) {
@@ -1048,13 +1057,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
1048 i2c->adap.algo_data = i2c; 1057 i2c->adap.algo_data = i2c;
1049 i2c->adap.dev.parent = &dev->dev; 1058 i2c->adap.dev.parent = &dev->dev;
1050 1059
1051 /*
1052 * If "dev->id" is negative we consider it as zero.
1053 * The reason to do so is to avoid sysfs names that only make
1054 * sense when there are multiple adapters.
1055 */
1056 i2c->adap.nr = dev->id != -1 ? dev->id : 0;
1057
1058 ret = i2c_add_numbered_adapter(&i2c->adap); 1060 ret = i2c_add_numbered_adapter(&i2c->adap);
1059 if (ret < 0) { 1061 if (ret < 0) {
1060 printk(KERN_INFO "I2C: Failed to add bus\n"); 1062 printk(KERN_INFO "I2C: Failed to add bus\n");
@@ -1078,6 +1080,7 @@ eadapt:
1078ereqirq: 1080ereqirq:
1079 clk_disable(i2c->clk); 1081 clk_disable(i2c->clk);
1080 i2c_pxa_disable(dev); 1082 i2c_pxa_disable(dev);
1083 iounmap(i2c->reg_base);
1081eremap: 1084eremap:
1082 clk_put(i2c->clk); 1085 clk_put(i2c->clk);
1083eclk: 1086eclk:
@@ -1087,7 +1090,7 @@ emalloc:
1087 return ret; 1090 return ret;
1088} 1091}
1089 1092
1090static int i2c_pxa_remove(struct platform_device *dev) 1093static int __exit i2c_pxa_remove(struct platform_device *dev)
1091{ 1094{
1092 struct pxa_i2c *i2c = platform_get_drvdata(dev); 1095 struct pxa_i2c *i2c = platform_get_drvdata(dev);
1093 1096
@@ -1101,6 +1104,7 @@ static int i2c_pxa_remove(struct platform_device *dev)
1101 clk_put(i2c->clk); 1104 clk_put(i2c->clk);
1102 i2c_pxa_disable(dev); 1105 i2c_pxa_disable(dev);
1103 1106
1107 iounmap(i2c->reg_base);
1104 release_mem_region(i2c->iobase, i2c->iosize); 1108 release_mem_region(i2c->iobase, i2c->iosize);
1105 kfree(i2c); 1109 kfree(i2c);
1106 1110
@@ -1109,9 +1113,10 @@ static int i2c_pxa_remove(struct platform_device *dev)
1109 1113
1110static struct platform_driver i2c_pxa_driver = { 1114static struct platform_driver i2c_pxa_driver = {
1111 .probe = i2c_pxa_probe, 1115 .probe = i2c_pxa_probe,
1112 .remove = i2c_pxa_remove, 1116 .remove = __exit_p(i2c_pxa_remove),
1113 .driver = { 1117 .driver = {
1114 .name = "pxa2xx-i2c", 1118 .name = "pxa2xx-i2c",
1119 .owner = THIS_MODULE,
1115 }, 1120 },
1116}; 1121};
1117 1122
@@ -1120,9 +1125,9 @@ static int __init i2c_adap_pxa_init(void)
1120 return platform_driver_register(&i2c_pxa_driver); 1125 return platform_driver_register(&i2c_pxa_driver);
1121} 1126}
1122 1127
1123static void i2c_adap_pxa_exit(void) 1128static void __exit i2c_adap_pxa_exit(void)
1124{ 1129{
1125 return platform_driver_unregister(&i2c_pxa_driver); 1130 platform_driver_unregister(&i2c_pxa_driver);
1126} 1131}
1127 1132
1128MODULE_LICENSE("GPL"); 1133MODULE_LICENSE("GPL");
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 043c34ad0a05..df752e690e47 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -378,6 +378,9 @@ config BLK_DEV_IDEPNP
378 would like the kernel to automatically detect and activate 378 would like the kernel to automatically detect and activate
379 it, say Y here. 379 it, say Y here.
380 380
381config BLK_DEV_IDEDMA_SFF
382 bool
383
381if PCI 384if PCI
382 385
383comment "PCI IDE chipsets support" 386comment "PCI IDE chipsets support"
@@ -459,6 +462,7 @@ config BLK_DEV_RZ1000
459config BLK_DEV_IDEDMA_PCI 462config BLK_DEV_IDEDMA_PCI
460 bool 463 bool
461 select BLK_DEV_IDEPCI 464 select BLK_DEV_IDEPCI
465 select BLK_DEV_IDEDMA_SFF
462 466
463config BLK_DEV_AEC62XX 467config BLK_DEV_AEC62XX
464 tristate "AEC62XX chipset support" 468 tristate "AEC62XX chipset support"
@@ -688,23 +692,6 @@ config BLK_DEV_PDC202XX_OLD
688 692
689 If unsure, say N. 693 If unsure, say N.
690 694
691config PDC202XX_BURST
692 bool "Special UDMA Feature"
693 depends on BLK_DEV_PDC202XX_OLD
694 help
695 This option causes the pdc202xx driver to enable UDMA modes on the
696 PDC202xx even when the PDC202xx BIOS has not done so.
697
698 It was originally designed for the PDC20246/Ultra33, whose BIOS will
699 only setup UDMA on the first two PDC20246 cards. It has also been
700 used successfully on a PDC20265/Ultra100, allowing use of UDMA modes
701 when the PDC20265 BIOS has been disabled (for faster boot up).
702
703 Please read the comments at the top of
704 <file:drivers/ide/pci/pdc202xx_old.c>.
705
706 If unsure, say N.
707
708config BLK_DEV_PDC202XX_NEW 695config BLK_DEV_PDC202XX_NEW
709 tristate "PROMISE PDC202{68|69|70|71|75|76|77} support" 696 tristate "PROMISE PDC202{68|69|70|71|75|76|77} support"
710 select BLK_DEV_IDEDMA_PCI 697 select BLK_DEV_IDEDMA_PCI
@@ -1016,7 +1003,7 @@ config BLK_DEV_Q40IDE
1016config BLK_DEV_PALMCHIP_BK3710 1003config BLK_DEV_PALMCHIP_BK3710
1017 tristate "Palmchip bk3710 IDE controller support" 1004 tristate "Palmchip bk3710 IDE controller support"
1018 depends on ARCH_DAVINCI 1005 depends on ARCH_DAVINCI
1019 select BLK_DEV_IDEDMA_PCI 1006 select BLK_DEV_IDEDMA_SFF
1020 help 1007 help
1021 Say Y here if you want to support the onchip IDE controller on the 1008 Say Y here if you want to support the onchip IDE controller on the
1022 TI DaVinci SoC 1009 TI DaVinci SoC
@@ -1124,7 +1111,8 @@ config BLK_DEV_UMC8672
1124endif 1111endif
1125 1112
1126config BLK_DEV_IDEDMA 1113config BLK_DEV_IDEDMA
1127 def_bool BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 1114 def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \
1115 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
1128 1116
1129config IDE_ARCH_OBSOLETE_INIT 1117config IDE_ARCH_OBSOLETE_INIT
1130 def_bool ALPHA || (ARM && !ARCH_L7200) || BLACKFIN || X86 || IA64 || M32R || MIPS || PARISC || PPC || (SUPERH64 && BLK_DEV_IDEPCI) || SPARC 1118 def_bool ALPHA || (ARM && !ARCH_L7200) || BLACKFIN || X86 || IA64 || M32R || MIPS || PARISC || PPC || (SUPERH64 && BLK_DEV_IDEPCI) || SPARC
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c
index 0e7574c0ee60..161d30c8481e 100644
--- a/drivers/ide/arm/bast-ide.c
+++ b/drivers/ide/arm/bast-ide.c
@@ -21,12 +21,7 @@
21#include <asm/arch/bast-map.h> 21#include <asm/arch/bast-map.h>
22#include <asm/arch/bast-irq.h> 22#include <asm/arch/bast-irq.h>
23 23
24/* list of registered interfaces */ 24static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
25static ide_hwif_t *ifs[2];
26
27static int __init
28bastide_register(unsigned int base, unsigned int aux, int irq,
29 ide_hwif_t **hwif)
30{ 25{
31 ide_hwif_t *hwif; 26 ide_hwif_t *hwif;
32 hw_regs_t hw; 27 hw_regs_t hw;
@@ -76,8 +71,9 @@ static int __init bastide_init(void)
76 71
77 printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n"); 72 printk("BAST: IDE driver, (c) 2003-2004 Simtec Electronics\n");
78 73
79 bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0, &ifs[0]); 74 bastide_register(BAST_VA_IDEPRI, BAST_VA_IDEPRIAUX, IRQ_IDE0);
80 bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1, &ifs[1]); 75 bastide_register(BAST_VA_IDESEC, BAST_VA_IDESECAUX, IRQ_IDE1);
76
81 return 0; 77 return 0;
82} 78}
83 79
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
index c3069970a012..8e1f6bd33887 100644
--- a/drivers/ide/arm/palm_bk3710.c
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -311,15 +311,37 @@ static void __devinit palm_bk3710_chipinit(void __iomem *base)
311 palm_bk3710_setpiomode(base, NULL, 0, 600, 0); 311 palm_bk3710_setpiomode(base, NULL, 0, 600, 0);
312 palm_bk3710_setpiomode(base, NULL, 1, 600, 0); 312 palm_bk3710_setpiomode(base, NULL, 1, 600, 0);
313} 313}
314
315static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
316{
317 return ATA_CBL_PATA80;
318}
319
320static void __devinit palm_bk3710_init_hwif(ide_hwif_t *hwif)
321{
322 hwif->set_pio_mode = palm_bk3710_set_pio_mode;
323 hwif->set_dma_mode = palm_bk3710_set_dma_mode;
324
325 hwif->cable_detect = palm_bk3710_cable_detect;
326}
327
328static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
329 .init_hwif = palm_bk3710_init_hwif,
330 .host_flags = IDE_HFLAG_NO_DMA, /* hack (no PCI) */
331 .pio_mask = ATA_PIO4,
332 .udma_mask = ATA_UDMA4, /* (input clk 99MHz) */
333 .mwdma_mask = ATA_MWDMA2,
334};
335
314static int __devinit palm_bk3710_probe(struct platform_device *pdev) 336static int __devinit palm_bk3710_probe(struct platform_device *pdev)
315{ 337{
316 hw_regs_t ide_ctlr_info;
317 int index = 0;
318 int pribase;
319 struct clk *clkp; 338 struct clk *clkp;
320 struct resource *mem, *irq; 339 struct resource *mem, *irq;
321 ide_hwif_t *hwif; 340 ide_hwif_t *hwif;
322 void __iomem *base; 341 void __iomem *base;
342 int pribase, i;
343 hw_regs_t hw;
344 u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
323 345
324 clkp = clk_get(NULL, "IDECLK"); 346 clkp = clk_get(NULL, "IDECLK");
325 if (IS_ERR(clkp)) 347 if (IS_ERR(clkp))
@@ -330,7 +352,7 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
330 ide_palm_clk = clk_get_rate(ideclkp)/100000; 352 ide_palm_clk = clk_get_rate(ideclkp)/100000;
331 ide_palm_clk = (10000/ide_palm_clk) + 1; 353 ide_palm_clk = (10000/ide_palm_clk) + 1;
332 /* Register the IDE interface with Linux ATA Interface */ 354 /* Register the IDE interface with Linux ATA Interface */
333 memset(&ide_ctlr_info, 0, sizeof(ide_ctlr_info)); 355 memset(&hw, 0, sizeof(hw));
334 356
335 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 357 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
336 if (mem == NULL) { 358 if (mem == NULL) {
@@ -349,32 +371,42 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
349 palm_bk3710_chipinit(base); 371 palm_bk3710_chipinit(base);
350 372
351 pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET; 373 pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
352 for (index = 0; index < IDE_NR_PORTS - 2; index++) 374 for (i = 0; i < IDE_NR_PORTS - 2; i++)
353 ide_ctlr_info.io_ports[index] = pribase + index; 375 hw.io_ports[i] = pribase + i;
354 ide_ctlr_info.io_ports[IDE_CONTROL_OFFSET] = mem->start + 376 hw.io_ports[IDE_CONTROL_OFFSET] = mem->start +
355 IDE_PALM_ATA_PRI_CTL_OFFSET; 377 IDE_PALM_ATA_PRI_CTL_OFFSET;
356 ide_ctlr_info.irq = irq->start; 378 hw.irq = irq->start;
357 ide_ctlr_info.chipset = ide_palm3710; 379 hw.chipset = ide_palm3710;
358 380
359 if (ide_register_hw(&ide_ctlr_info, NULL, &hwif) < 0) { 381 hwif = ide_deprecated_find_port(hw.io_ports[IDE_DATA_OFFSET]);
360 printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n"); 382 if (hwif == NULL)
361 return -ENODEV; 383 goto out;
362 } 384
385 i = hwif->index;
386
387 if (hwif->present)
388 ide_unregister(i, 0, 0);
389 else if (!hwif->hold)
390 ide_init_port_data(hwif, i);
391
392 ide_init_port_hw(hwif, &hw);
363 393
364 hwif->set_pio_mode = &palm_bk3710_set_pio_mode;
365 hwif->set_dma_mode = &palm_bk3710_set_dma_mode;
366 hwif->mmio = 1; 394 hwif->mmio = 1;
367 default_hwif_mmiops(hwif); 395 default_hwif_mmiops(hwif);
368 hwif->cbl = ATA_CBL_PATA80;
369 hwif->ultra_mask = 0x1f; /* Ultra DMA Mode 4 Max
370 (input clk 99MHz) */
371 hwif->mwdma_mask = 0x7;
372 hwif->drives[0].autotune = 1;
373 hwif->drives[1].autotune = 1;
374 396
375 ide_setup_dma(hwif, mem->start); 397 ide_setup_dma(hwif, mem->start);
376 398
399 idx[0] = i;
400
401 ide_device_add(idx, &palm_bk3710_port_info);
402
403 if (!hwif->present)
404 goto out;
405
377 return 0; 406 return 0;
407out:
408 printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
409 return -ENODEV;
378} 410}
379 411
380static struct platform_driver platform_bk_driver = { 412static struct platform_driver platform_bk_driver = {
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 5e42c19a03e3..310e497b5838 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1207,9 +1207,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
1207end_request: 1207end_request:
1208 if (blk_pc_request(rq)) { 1208 if (blk_pc_request(rq)) {
1209 unsigned long flags; 1209 unsigned long flags;
1210 unsigned int dlen = rq->data_len;
1211
1212 if (dma)
1213 rq->data_len = 0;
1210 1214
1211 spin_lock_irqsave(&ide_lock, flags); 1215 spin_lock_irqsave(&ide_lock, flags);
1212 if (__blk_end_request(rq, 0, rq->data_len)) 1216 if (__blk_end_request(rq, 0, dlen))
1213 BUG(); 1217 BUG();
1214 HWGROUP(drive)->rq = NULL; 1218 HWGROUP(drive)->rq = NULL;
1215 spin_unlock_irqrestore(&ide_lock, flags); 1219 spin_unlock_irqrestore(&ide_lock, flags);
@@ -1555,7 +1559,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1555 if (stat) 1559 if (stat)
1556 return stat; 1560 return stat;
1557 1561
1558 toc->hdr.toc_length = ntohs (toc->hdr.toc_length); 1562 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
1559 1563
1560 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) { 1564 if (info->cd_flags & IDE_CD_FLAG_TOCTRACKS_AS_BCD) {
1561 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1565 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track);
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 3c69822507e2..8f5bed471050 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -397,6 +397,7 @@ static inline int idedisk_supports_lba48(const struct hd_driveid *id)
397static const struct drive_list_entry hpa_list[] = { 397static const struct drive_list_entry hpa_list[] = {
398 { "ST340823A", NULL }, 398 { "ST340823A", NULL },
399 { "ST320413A", NULL }, 399 { "ST320413A", NULL },
400 { "ST310211A", NULL },
400 { NULL, NULL } 401 { NULL, NULL }
401}; 402};
402 403
@@ -590,20 +591,24 @@ static ide_proc_entry_t idedisk_proc[] = {
590static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) 591static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
591{ 592{
592 ide_drive_t *drive = q->queuedata; 593 ide_drive_t *drive = q->queuedata;
593 ide_task_t task; 594 ide_task_t *task = kmalloc(sizeof(*task), GFP_ATOMIC);
594 595
595 memset(&task, 0, sizeof(task)); 596 /* FIXME: map struct ide_taskfile on rq->cmd[] */
597 BUG_ON(task == NULL);
598
599 memset(task, 0, sizeof(*task));
596 if (ide_id_has_flush_cache_ext(drive->id) && 600 if (ide_id_has_flush_cache_ext(drive->id) &&
597 (drive->capacity64 >= (1UL << 28))) 601 (drive->capacity64 >= (1UL << 28)))
598 task.tf.command = WIN_FLUSH_CACHE_EXT; 602 task->tf.command = WIN_FLUSH_CACHE_EXT;
599 else 603 else
600 task.tf.command = WIN_FLUSH_CACHE; 604 task->tf.command = WIN_FLUSH_CACHE;
601 task.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; 605 task->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE |
602 task.data_phase = TASKFILE_NO_DATA; 606 IDE_TFLAG_DYN;
607 task->data_phase = TASKFILE_NO_DATA;
603 608
604 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 609 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
605 rq->cmd_flags |= REQ_SOFTBARRIER; 610 rq->cmd_flags |= REQ_SOFTBARRIER;
606 rq->special = &task; 611 rq->special = task;
607} 612}
608 613
609/* 614/*
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index a4bb32883c6b..d0e7b537353e 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -198,7 +198,7 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)
198 198
199EXPORT_SYMBOL_GPL(ide_build_sglist); 199EXPORT_SYMBOL_GPL(ide_build_sglist);
200 200
201#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 201#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
202/** 202/**
203 * ide_build_dmatable - build IDE DMA table 203 * ide_build_dmatable - build IDE DMA table
204 * 204 *
@@ -316,7 +316,7 @@ void ide_destroy_dmatable (ide_drive_t *drive)
316 316
317EXPORT_SYMBOL_GPL(ide_destroy_dmatable); 317EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
318 318
319#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 319#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
320/** 320/**
321 * config_drive_for_dma - attempt to activate IDE DMA 321 * config_drive_for_dma - attempt to activate IDE DMA
322 * @drive: the drive to place in DMA mode 322 * @drive: the drive to place in DMA mode
@@ -424,7 +424,7 @@ void ide_dma_host_set(ide_drive_t *drive, int on)
424} 424}
425 425
426EXPORT_SYMBOL_GPL(ide_dma_host_set); 426EXPORT_SYMBOL_GPL(ide_dma_host_set);
427#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ 427#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
428 428
429/** 429/**
430 * ide_dma_off_quietly - Generic DMA kill 430 * ide_dma_off_quietly - Generic DMA kill
@@ -474,7 +474,7 @@ void ide_dma_on(ide_drive_t *drive)
474 drive->hwif->dma_host_set(drive, 1); 474 drive->hwif->dma_host_set(drive, 1);
475} 475}
476 476
477#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 477#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
478/** 478/**
479 * ide_dma_setup - begin a DMA phase 479 * ide_dma_setup - begin a DMA phase
480 * @drive: target device 480 * @drive: target device
@@ -591,7 +591,7 @@ static int __ide_dma_test_irq(ide_drive_t *drive)
591} 591}
592#else 592#else
593static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } 593static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
594#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ 594#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
595 595
596int __ide_dma_bad_drive (ide_drive_t *drive) 596int __ide_dma_bad_drive (ide_drive_t *drive)
597{ 597{
@@ -840,7 +840,7 @@ void ide_check_dma_crc(ide_drive_t *drive)
840 ide_dma_on(drive); 840 ide_dma_on(drive);
841} 841}
842 842
843#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 843#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
844void ide_dma_lost_irq (ide_drive_t *drive) 844void ide_dma_lost_irq (ide_drive_t *drive)
845{ 845{
846 printk("%s: DMA interrupt recovery\n", drive->name); 846 printk("%s: DMA interrupt recovery\n", drive->name);
@@ -1002,4 +1002,4 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
1002} 1002}
1003 1003
1004EXPORT_SYMBOL_GPL(ide_setup_dma); 1004EXPORT_SYMBOL_GPL(ide_setup_dma);
1005#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */ 1005#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index 709b9e4d2871..9ebec08eefd9 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -17,9 +17,6 @@ static int __init ide_generic_init(void)
17 u8 idx[MAX_HWIFS]; 17 u8 idx[MAX_HWIFS];
18 int i; 18 int i;
19 19
20 if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
21 ide_get_lock(NULL, NULL); /* for atari only */
22
23 for (i = 0; i < MAX_HWIFS; i++) { 20 for (i = 0; i < MAX_HWIFS; i++) {
24 ide_hwif_t *hwif = &ide_hwifs[i]; 21 ide_hwif_t *hwif = &ide_hwifs[i];
25 22
@@ -31,9 +28,6 @@ static int __init ide_generic_init(void)
31 28
32 ide_device_add_all(idx, NULL); 29 ide_device_add_all(idx, NULL);
33 30
34 if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
35 ide_release_lock(); /* for atari only */
36
37 return 0; 31 return 0;
38} 32}
39 33
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 3addbe478d26..715379605a7b 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -361,17 +361,21 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
361 spin_unlock_irqrestore(&ide_lock, flags); 361 spin_unlock_irqrestore(&ide_lock, flags);
362 362
363 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 363 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
364 ide_task_t *args = (ide_task_t *) rq->special; 364 ide_task_t *task = (ide_task_t *)rq->special;
365
365 if (rq->errors == 0) 366 if (rq->errors == 0)
366 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); 367 rq->errors = !OK_STAT(stat, READY_STAT, BAD_STAT);
367 368
368 if (args) { 369 if (task) {
369 struct ide_taskfile *tf = &args->tf; 370 struct ide_taskfile *tf = &task->tf;
370 371
371 tf->error = err; 372 tf->error = err;
372 tf->status = stat; 373 tf->status = stat;
373 374
374 ide_tf_read(drive, args); 375 ide_tf_read(drive, task);
376
377 if (task->tf_flags & IDE_TFLAG_DYN)
378 kfree(task);
375 } 379 }
376 } else if (blk_pm_request(rq)) { 380 } else if (blk_pm_request(rq)) {
377 struct request_pm_state *pm = rq->data; 381 struct request_pm_state *pm = rq->data;
@@ -388,7 +392,8 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
388 spin_lock_irqsave(&ide_lock, flags); 392 spin_lock_irqsave(&ide_lock, flags);
389 HWGROUP(drive)->rq = NULL; 393 HWGROUP(drive)->rq = NULL;
390 rq->errors = err; 394 rq->errors = err;
391 if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0)) 395 if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0),
396 blk_rq_bytes(rq))))
392 BUG(); 397 BUG();
393 spin_unlock_irqrestore(&ide_lock, flags); 398 spin_unlock_irqrestore(&ide_lock, flags);
394} 399}
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index c32e759df208..c419266234a7 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -786,15 +786,11 @@ static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
786{ 786{
787 ide_hwgroup_t *hwgroup = HWGROUP(drive); 787 ide_hwgroup_t *hwgroup = HWGROUP(drive);
788 788
789 if (hwgroup->handler != NULL) { 789 BUG_ON(hwgroup->handler);
790 printk(KERN_CRIT "%s: ide_set_handler: handler not null; "
791 "old=%p, new=%p\n",
792 drive->name, hwgroup->handler, handler);
793 }
794 hwgroup->handler = handler; 790 hwgroup->handler = handler;
795 hwgroup->expiry = expiry; 791 hwgroup->expiry = expiry;
796 hwgroup->timer.expires = jiffies + timeout; 792 hwgroup->timer.expires = jiffies + timeout;
797 hwgroup->req_gen_timer = hwgroup->req_gen; 793 hwgroup->req_gen_timer = hwgroup->req_gen;
798 add_timer(&hwgroup->timer); 794 add_timer(&hwgroup->timer);
799} 795}
800 796
@@ -827,11 +823,9 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
827 unsigned timeout, ide_expiry_t *expiry) 823 unsigned timeout, ide_expiry_t *expiry)
828{ 824{
829 unsigned long flags; 825 unsigned long flags;
830 ide_hwgroup_t *hwgroup = HWGROUP(drive);
831 ide_hwif_t *hwif = HWIF(drive); 826 ide_hwif_t *hwif = HWIF(drive);
832 827
833 spin_lock_irqsave(&ide_lock, flags); 828 spin_lock_irqsave(&ide_lock, flags);
834 BUG_ON(hwgroup->handler);
835 __ide_set_handler(drive, handler, timeout, expiry); 829 __ide_set_handler(drive, handler, timeout, expiry);
836 hwif->OUTBSYNC(drive, cmd, IDE_COMMAND_REG); 830 hwif->OUTBSYNC(drive, cmd, IDE_COMMAND_REG);
837 /* 831 /*
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index 1ff676cc6473..29e2c9719c30 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -21,15 +21,6 @@
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/io.h> 22#include <asm/io.h>
23 23
24/*
25 * IDE library routines. These are plug in code that most
26 * drivers can use but occasionally may be weird enough
27 * to want to do their own thing with
28 *
29 * Add common non I/O op stuff here. Make sure it has proper
30 * kernel-doc function headers or your patch will be rejected
31 */
32
33static const char *udma_str[] = 24static const char *udma_str[] =
34 { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44", 25 { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
35 "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" }; 26 "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 6daea896c5db..4a2cb2868226 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1051,7 +1051,7 @@ static int init_irq (ide_hwif_t *hwif)
1051 int sa = 0; 1051 int sa = 0;
1052#if defined(__mc68000__) 1052#if defined(__mc68000__)
1053 sa = IRQF_SHARED; 1053 sa = IRQF_SHARED;
1054#endif /* __mc68000__ || CONFIG_APUS */ 1054#endif /* __mc68000__ */
1055 1055
1056 if (IDE_CHIPSET_IS_PCI(hwif->chipset)) 1056 if (IDE_CHIPSET_IS_PCI(hwif->chipset))
1057 sa = IRQF_SHARED; 1057 sa = IRQF_SHARED;
@@ -1355,7 +1355,7 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
1355 hwif->ultra_mask = d->udma_mask; 1355 hwif->ultra_mask = d->udma_mask;
1356 1356
1357 /* reset DMA masks only for SFF-style DMA controllers */ 1357 /* reset DMA masks only for SFF-style DMA controllers */
1358 if ((d->host_flags && IDE_HFLAG_NO_DMA) == 0 && hwif->dma_base == 0) 1358 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0 && hwif->dma_base == 0)
1359 hwif->swdma_mask = hwif->mwdma_mask = hwif->ultra_mask = 0; 1359 hwif->swdma_mask = hwif->mwdma_mask = hwif->ultra_mask = 0;
1360 1360
1361 if (d->host_flags & IDE_HFLAG_RQSIZE_256) 1361 if (d->host_flags & IDE_HFLAG_RQSIZE_256)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 49dd2e7bae7a..0598ecfd5f37 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -466,9 +466,6 @@ static void ide_tape_put(struct ide_tape_obj *tape)
466/* 0 = no tape is loaded, so we don't rewind after ejecting */ 466/* 0 = no tape is loaded, so we don't rewind after ejecting */
467#define IDETAPE_MEDIUM_PRESENT 9 467#define IDETAPE_MEDIUM_PRESENT 9
468 468
469/* A define for the READ BUFFER command */
470#define IDETAPE_RETRIEVE_FAULTY_BLOCK 6
471
472/* Some defines for the SPACE command */ 469/* Some defines for the SPACE command */
473#define IDETAPE_SPACE_OVER_FILEMARK 1 470#define IDETAPE_SPACE_OVER_FILEMARK 1
474#define IDETAPE_SPACE_TO_EOD 3 471#define IDETAPE_SPACE_TO_EOD 3
@@ -490,7 +487,6 @@ enum {
490 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */ 487 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
491 REQ_IDETAPE_READ = (1 << 2), 488 REQ_IDETAPE_READ = (1 << 2),
492 REQ_IDETAPE_WRITE = (1 << 3), 489 REQ_IDETAPE_WRITE = (1 << 3),
493 REQ_IDETAPE_READ_BUFFER = (1 << 4),
494}; 490};
495 491
496/* Error codes returned in rq->errors to the higher part of the driver. */ 492/* Error codes returned in rq->errors to the higher part of the driver. */
@@ -1523,29 +1519,6 @@ static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
1523 set_bit(PC_DMA_RECOMMENDED, &pc->flags); 1519 set_bit(PC_DMA_RECOMMENDED, &pc->flags);
1524} 1520}
1525 1521
1526static void idetape_create_read_buffer_cmd(idetape_tape_t *tape,
1527 idetape_pc_t *pc, struct idetape_bh *bh)
1528{
1529 int size = 32768;
1530 struct idetape_bh *p = bh;
1531
1532 idetape_init_pc(pc);
1533 pc->c[0] = READ_BUFFER;
1534 pc->c[1] = IDETAPE_RETRIEVE_FAULTY_BLOCK;
1535 pc->c[7] = size >> 8;
1536 pc->c[8] = size & 0xff;
1537 pc->callback = &idetape_pc_callback;
1538 pc->bh = bh;
1539 atomic_set(&bh->b_count, 0);
1540 pc->buffer = NULL;
1541 while (p) {
1542 atomic_set(&p->b_count, 0);
1543 p = p->b_reqnext;
1544 }
1545 pc->request_transfer = size;
1546 pc->buffer_size = size;
1547}
1548
1549static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc, 1522static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
1550 unsigned int length, struct idetape_bh *bh) 1523 unsigned int length, struct idetape_bh *bh)
1551{ 1524{
@@ -1655,13 +1628,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1655 (struct idetape_bh *)rq->special); 1628 (struct idetape_bh *)rq->special);
1656 goto out; 1629 goto out;
1657 } 1630 }
1658 if (rq->cmd[0] & REQ_IDETAPE_READ_BUFFER) {
1659 tape->postpone_cnt = 0;
1660 pc = idetape_next_pc_storage(drive);
1661 idetape_create_read_buffer_cmd(tape, pc,
1662 (struct idetape_bh *)rq->special);
1663 goto out;
1664 }
1665 if (rq->cmd[0] & REQ_IDETAPE_PC1) { 1631 if (rq->cmd[0] & REQ_IDETAPE_PC1) {
1666 pc = (idetape_pc_t *) rq->buffer; 1632 pc = (idetape_pc_t *) rq->buffer;
1667 rq->cmd[0] &= ~(REQ_IDETAPE_PC1); 1633 rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index ad0e9955f73c..477833f0daf5 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -44,8 +44,6 @@
44 * inspiration from lots of linux users, esp. hamish@zot.apana.org.au 44 * inspiration from lots of linux users, esp. hamish@zot.apana.org.au
45 */ 45 */
46 46
47#define REVISION "Revision: 7.00alpha2"
48
49#define _IDE_C /* Tell ide.h it's really us */ 47#define _IDE_C /* Tell ide.h it's really us */
50 48
51#include <linux/module.h> 49#include <linux/module.h>
@@ -1231,7 +1229,7 @@ static int __init ide_setup(char *s)
1231 if (!strcmp(s, "ide=reverse")) { 1229 if (!strcmp(s, "ide=reverse")) {
1232 ide_scan_direction = 1; 1230 ide_scan_direction = 1;
1233 printk(" : Enabled support for IDE inverse scan order.\n"); 1231 printk(" : Enabled support for IDE inverse scan order.\n");
1234 return 1; 1232 goto obsolete_option;
1235 } 1233 }
1236#endif 1234#endif
1237 1235
@@ -1618,7 +1616,7 @@ static int __init ide_init(void)
1618{ 1616{
1619 int ret; 1617 int ret;
1620 1618
1621 printk(KERN_INFO "Uniform Multi-Platform E-IDE driver " REVISION "\n"); 1619 printk(KERN_INFO "Uniform Multi-Platform E-IDE driver\n");
1622 system_bus_speed = ide_system_bus_speed(); 1620 system_bus_speed = ide_system_bus_speed();
1623 1621
1624 printk(KERN_INFO "ide: Assuming %dMHz system bus speed " 1622 printk(KERN_INFO "ide: Assuming %dMHz system bus speed "
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index f044048903b3..8949ce71bddc 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -54,7 +54,7 @@ static void __init falconide_setup_ports(hw_regs_t *hw)
54 for (i = 1; i < 8; i++) 54 for (i = 1; i < 8; i++)
55 hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4; 55 hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4;
56 56
57 hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_CONTROL; 57 hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_BASE + ATA_HD_CONTROL;
58 58
59 hw->irq = IRQ_MFP_IDE; 59 hw->irq = IRQ_MFP_IDE;
60 hw->ack_intr = NULL; 60 hw->ack_intr = NULL;
@@ -84,7 +84,9 @@ static int __init falconide_init(void)
84 ide_init_port_data(hwif, index); 84 ide_init_port_data(hwif, index);
85 ide_init_port_hw(hwif, &hw); 85 ide_init_port_hw(hwif, &hw);
86 86
87 ide_get_lock(NULL, NULL);
87 ide_device_add(idx, NULL); 88 ide_device_add(idx, NULL);
89 ide_release_lock();
88 } 90 }
89 91
90 return 0; 92 return 0;
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index 9d3851d27677..b7d81090d5da 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -94,7 +94,7 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
94 94
95static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, 95static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
96 unsigned long ctl, unsigned long irq_port, 96 unsigned long ctl, unsigned long irq_port,
97 ide_ack_intr_t *ack_intr); 97 ide_ack_intr_t *ack_intr)
98{ 98{
99 int i; 99 int i;
100 100
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c
index 02d12c74764a..78ca68e60f97 100644
--- a/drivers/ide/legacy/ht6560b.c
+++ b/drivers/ide/legacy/ht6560b.c
@@ -21,18 +21,21 @@
21 * "Prefetch" mode bit OFF for ide disks and 21 * "Prefetch" mode bit OFF for ide disks and
22 * ON for anything else. 22 * ON for anything else.
23 * 23 *
24 * Version 0.08 Need to force prefetch for CDs and other non-disk
25 * devices. (not sure which devices exactly need
26 * prefetch)
24 * 27 *
25 * HT-6560B EIDE-controller support 28 * HT-6560B EIDE-controller support
26 * To activate controller support use kernel parameter "ide0=ht6560b". 29 * To activate controller support use kernel parameter "ide0=ht6560b".
27 * Use hdparm utility to enable PIO mode support. 30 * Use hdparm utility to enable PIO mode support.
28 * 31 *
29 * Author: Mikko Ala-Fossi <maf@iki.fi> 32 * Author: Mikko Ala-Fossi <maf@iki.fi>
30 * Jan Evert van Grootheest <janevert@iae.nl> 33 * Jan Evert van Grootheest <janevert@caiway.nl>
31 * 34 *
32 * Try: http://www.maf.iki.fi/~maf/ht6560b/ 35 * Try: http://www.maf.iki.fi/~maf/ht6560b/
33 */ 36 */
34 37
35#define HT6560B_VERSION "v0.07" 38#define HT6560B_VERSION "v0.08"
36 39
37#include <linux/module.h> 40#include <linux/module.h>
38#include <linux/types.h> 41#include <linux/types.h>
@@ -130,15 +133,20 @@ static void ht6560b_selectproc (ide_drive_t *drive)
130 u8 select, timing; 133 u8 select, timing;
131 134
132 local_irq_save(flags); 135 local_irq_save(flags);
133 136
134 select = HT_CONFIG(drive); 137 select = HT_CONFIG(drive);
135 timing = HT_TIMING(drive); 138 timing = HT_TIMING(drive);
136 139
140 /*
141 * Need to enforce prefetch sometimes because otherwise
142 * it'll hang (hard).
143 */
144 if (drive->media != ide_disk || !drive->present)
145 select |= HT_PREFETCH_MODE;
146
137 if (select != current_select || timing != current_timing) { 147 if (select != current_select || timing != current_timing) {
138 current_select = select; 148 current_select = select;
139 current_timing = timing; 149 current_timing = timing;
140 if (drive->media != ide_disk || !drive->present)
141 select |= HT_PREFETCH_MODE;
142 (void)inb(HT_CONFIG_PORT); 150 (void)inb(HT_CONFIG_PORT);
143 (void)inb(HT_CONFIG_PORT); 151 (void)inb(HT_CONFIG_PORT);
144 (void)inb(HT_CONFIG_PORT); 152 (void)inb(HT_CONFIG_PORT);
@@ -188,11 +196,12 @@ static int __init try_to_init_ht6560b(void)
188 outb(HT_TIMING_DEFAULT, 0x1f6); /* IDE_SELECT_REG */ 196 outb(HT_TIMING_DEFAULT, 0x1f6); /* IDE_SELECT_REG */
189 (void) inb(0x1f7); /* IDE_STATUS_REG */ 197 (void) inb(0x1f7); /* IDE_STATUS_REG */
190 198
191 printk("\nht6560b " HT6560B_VERSION 199 printk("ht6560b " HT6560B_VERSION
192 ": chipset detected and initialized" 200 ": chipset detected and initialized"
193#ifdef DEBUG 201#ifdef DEBUG
194 " with debug enabled" 202 " with debug enabled"
195#endif 203#endif
204 "\n"
196 ); 205 );
197 return 1; 206 return 1;
198} 207}
@@ -323,7 +332,7 @@ static const struct ide_port_info ht6560b_port_info __initdata = {
323 IDE_HFLAG_NO_DMA | 332 IDE_HFLAG_NO_DMA |
324 IDE_HFLAG_NO_AUTOTUNE | 333 IDE_HFLAG_NO_AUTOTUNE |
325 IDE_HFLAG_ABUSE_PREFETCH, 334 IDE_HFLAG_ABUSE_PREFETCH,
326 .pio_mask = ATA_PIO5, 335 .pio_mask = ATA_PIO4,
327}; 336};
328 337
329static int __init ht6560b_init(void) 338static int __init ht6560b_init(void)
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index a61e60737dc7..9a79098d9eb4 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -74,7 +74,7 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
74 for (i = 0; i < 8; i++) 74 for (i = 0; i < 8; i++)
75 hw->io_ports[i] = base + i * 4; 75 hw->io_ports[i] = base + i * 4;
76 76
77 hw->io_ports[IDE_CONTROL_OFFSET] = IDE_CONTROL; 77 hw->io_ports[IDE_CONTROL_OFFSET] = base + IDE_CONTROL;
78 78
79 hw->irq = irq; 79 hw->irq = irq;
80 hw->ack_intr = ack_intr; 80 hw->ack_intr = ack_intr;
diff --git a/drivers/ide/pci/cs5520.c b/drivers/ide/pci/cs5520.c
index 0be1a824102b..1c163e4ef03f 100644
--- a/drivers/ide/pci/cs5520.c
+++ b/drivers/ide/pci/cs5520.c
@@ -147,11 +147,6 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
147 147
148 /* We must not grab the entire device, it has 'ISA' space in its 148 /* We must not grab the entire device, it has 'ISA' space in its
149 * BARS too and we will freak out other bits of the kernel 149 * BARS too and we will freak out other bits of the kernel
150 *
151 * pci_enable_device_bars() is going away. I replaced it with
152 * IO only enable for now but I'll need confirmation this is
153 * allright for that device. If not, it will need some kind of
154 * quirk. --BenH.
155 */ 150 */
156 if (pci_enable_device_io(dev)) { 151 if (pci_enable_device_io(dev)) {
157 printk(KERN_WARNING "%s: Unable to enable 55x0.\n", d->name); 152 printk(KERN_WARNING "%s: Unable to enable 55x0.\n", d->name);
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index da4329790387..150422ec3cfa 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -3,26 +3,6 @@
3 * Copyright (C) 2006-2007 MontaVista Software, Inc. 3 * Copyright (C) 2006-2007 MontaVista Software, Inc.
4 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz 4 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
5 * 5 *
6 * Promise Ultra33 cards with BIOS v1.20 through 1.28 will need this
7 * compiled into the kernel if you have more than one card installed.
8 * Note that BIOS v1.29 is reported to fix the problem. Since this is
9 * safe chipset tuning, including this support is harmless
10 *
11 * Promise Ultra66 cards with BIOS v1.11 this
12 * compiled into the kernel if you have more than one card installed.
13 *
14 * Promise Ultra100 cards.
15 *
16 * The latest chipset code will support the following ::
17 * Three Ultra33 controllers and 12 drives.
18 * 8 are UDMA supported and 4 are limited to DMA mode 2 multi-word.
19 * The 8/4 ratio is a BIOS code limit by promise.
20 *
21 * UNLESS you enable "CONFIG_PDC202XX_BURST"
22 *
23 */
24
25/*
26 * Portions Copyright (C) 1999 Promise Technology, Inc. 6 * Portions Copyright (C) 1999 Promise Technology, Inc.
27 * Author: Frank Tiernan (frankt@promise.com) 7 * Author: Frank Tiernan (frankt@promise.com)
28 * Released under terms of General Public License 8 * Released under terms of General Public License
@@ -344,7 +324,6 @@ static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase)
344 (primary_mode & 1) ? "MASTER" : "PCI", 324 (primary_mode & 1) ? "MASTER" : "PCI",
345 (secondary_mode & 1) ? "MASTER" : "PCI" ); 325 (secondary_mode & 1) ? "MASTER" : "PCI" );
346 326
347#ifdef CONFIG_PDC202XX_BURST
348 if (!(udma_speed_flag & 1)) { 327 if (!(udma_speed_flag & 1)) {
349 printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ", 328 printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ",
350 hwif->cds->name, udma_speed_flag, 329 hwif->cds->name, udma_speed_flag,
@@ -352,7 +331,6 @@ static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase)
352 outb(udma_speed_flag | 1, dmabase | 0x1f); 331 outb(udma_speed_flag | 1, dmabase | 0x1f);
353 printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN"); 332 printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
354 } 333 }
355#endif /* CONFIG_PDC202XX_BURST */
356 334
357 ide_setup_dma(hwif, dmabase); 335 ide_setup_dma(hwif, dmabase);
358} 336}
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index f3f79f805813..9004e7521889 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -479,6 +479,7 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
479static const struct pci_device_id via_pci_tbl[] = { 479static const struct pci_device_id via_pci_tbl[] = {
480 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 }, 480 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
481 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 }, 481 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
482 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
482 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 }, 483 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
483 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 }, 484 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
484 { 0, }, 485 { 0, },
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 12ac3bfb4f9a..78c9eeb85634 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1254,7 +1254,7 @@ pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
1254 int rc = 0; 1254 int rc = 0;
1255 1255
1256 if (mesg.event != mdev->ofdev.dev.power.power_state.event 1256 if (mesg.event != mdev->ofdev.dev.power.power_state.event
1257 && mesg.event == PM_EVENT_SUSPEND) { 1257 && (mesg.event & PM_EVENT_SLEEP)) {
1258 rc = pmac_ide_do_suspend(hwif); 1258 rc = pmac_ide_do_suspend(hwif);
1259 if (rc == 0) 1259 if (rc == 0)
1260 mdev->ofdev.dev.power.power_state = mesg; 1260 mdev->ofdev.dev.power.power_state = mesg;
@@ -1364,7 +1364,7 @@ pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
1364 int rc = 0; 1364 int rc = 0;
1365 1365
1366 if (mesg.event != pdev->dev.power.power_state.event 1366 if (mesg.event != pdev->dev.power.power_state.event
1367 && mesg.event == PM_EVENT_SUSPEND) { 1367 && (mesg.event & PM_EVENT_SLEEP)) {
1368 rc = pmac_ide_do_suspend(hwif); 1368 rc = pmac_ide_do_suspend(hwif);
1369 if (rc == 0) 1369 if (rc == 0)
1370 pdev->dev.power.power_state = mesg; 1370 pdev->dev.power.power_state = mesg;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 638b727d42e0..b10ade92efed 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3587,8 +3587,6 @@ static void cm_release_port_obj(struct kobject *obj)
3587{ 3587{
3588 struct cm_port *cm_port; 3588 struct cm_port *cm_port;
3589 3589
3590 printk(KERN_ERR "free cm port\n");
3591
3592 cm_port = container_of(obj, struct cm_port, port_obj); 3590 cm_port = container_of(obj, struct cm_port, port_obj);
3593 kfree(cm_port); 3591 kfree(cm_port);
3594} 3592}
@@ -3601,8 +3599,6 @@ static void cm_release_dev_obj(struct kobject *obj)
3601{ 3599{
3602 struct cm_device *cm_dev; 3600 struct cm_device *cm_dev;
3603 3601
3604 printk(KERN_ERR "free cm dev\n");
3605
3606 cm_dev = container_of(obj, struct cm_device, dev_obj); 3602 cm_dev = container_of(obj, struct cm_device, dev_obj);
3607 kfree(cm_dev); 3603 kfree(cm_dev);
3608} 3604}
@@ -3616,18 +3612,12 @@ struct class cm_class = {
3616}; 3612};
3617EXPORT_SYMBOL(cm_class); 3613EXPORT_SYMBOL(cm_class);
3618 3614
3619static void cm_remove_fs_obj(struct kobject *obj)
3620{
3621 kobject_put(obj->parent);
3622 kobject_put(obj);
3623}
3624
3625static int cm_create_port_fs(struct cm_port *port) 3615static int cm_create_port_fs(struct cm_port *port)
3626{ 3616{
3627 int i, ret; 3617 int i, ret;
3628 3618
3629 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type, 3619 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3630 kobject_get(&port->cm_dev->dev_obj), 3620 &port->cm_dev->dev_obj,
3631 "%d", port->port_num); 3621 "%d", port->port_num);
3632 if (ret) { 3622 if (ret) {
3633 kfree(port); 3623 kfree(port);
@@ -3637,7 +3627,7 @@ static int cm_create_port_fs(struct cm_port *port)
3637 for (i = 0; i < CM_COUNTER_GROUPS; i++) { 3627 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3638 ret = kobject_init_and_add(&port->counter_group[i].obj, 3628 ret = kobject_init_and_add(&port->counter_group[i].obj,
3639 &cm_counter_obj_type, 3629 &cm_counter_obj_type,
3640 kobject_get(&port->port_obj), 3630 &port->port_obj,
3641 "%s", counter_group_names[i]); 3631 "%s", counter_group_names[i]);
3642 if (ret) 3632 if (ret)
3643 goto error; 3633 goto error;
@@ -3647,8 +3637,8 @@ static int cm_create_port_fs(struct cm_port *port)
3647 3637
3648error: 3638error:
3649 while (i--) 3639 while (i--)
3650 cm_remove_fs_obj(&port->counter_group[i].obj); 3640 kobject_put(&port->counter_group[i].obj);
3651 cm_remove_fs_obj(&port->port_obj); 3641 kobject_put(&port->port_obj);
3652 return ret; 3642 return ret;
3653 3643
3654} 3644}
@@ -3658,9 +3648,9 @@ static void cm_remove_port_fs(struct cm_port *port)
3658 int i; 3648 int i;
3659 3649
3660 for (i = 0; i < CM_COUNTER_GROUPS; i++) 3650 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3661 cm_remove_fs_obj(&port->counter_group[i].obj); 3651 kobject_put(&port->counter_group[i].obj);
3662 3652
3663 cm_remove_fs_obj(&port->port_obj); 3653 kobject_put(&port->port_obj);
3664} 3654}
3665 3655
3666static void cm_add_one(struct ib_device *device) 3656static void cm_add_one(struct ib_device *device)
@@ -3744,7 +3734,7 @@ error1:
3744 ib_unregister_mad_agent(port->mad_agent); 3734 ib_unregister_mad_agent(port->mad_agent);
3745 cm_remove_port_fs(port); 3735 cm_remove_port_fs(port);
3746 } 3736 }
3747 cm_remove_fs_obj(&cm_dev->dev_obj); 3737 kobject_put(&cm_dev->dev_obj);
3748} 3738}
3749 3739
3750static void cm_remove_one(struct ib_device *device) 3740static void cm_remove_one(struct ib_device *device)
@@ -3771,7 +3761,7 @@ static void cm_remove_one(struct ib_device *device)
3771 ib_unregister_mad_agent(port->mad_agent); 3761 ib_unregister_mad_agent(port->mad_agent);
3772 cm_remove_port_fs(port); 3762 cm_remove_port_fs(port);
3773 } 3763 }
3774 cm_remove_fs_obj(&cm_dev->dev_obj); 3764 kobject_put(&cm_dev->dev_obj);
3775} 3765}
3776 3766
3777static int __init ib_cm_init(void) 3767static int __init ib_cm_init(void)
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1eff1b2c0e08..34507daaf9b6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1107,7 +1107,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1107 event.param.ud.private_data_len = 1107 event.param.ud.private_data_len =
1108 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 1108 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1109 } else { 1109 } else {
1110 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1111 conn_id = cma_new_conn_id(&listen_id->id, ib_event); 1110 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1112 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 1111 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1113 ib_event->private_data, offset); 1112 ib_event->private_data, offset);
@@ -1130,6 +1129,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1130 1129
1131 ret = conn_id->id.event_handler(&conn_id->id, &event); 1130 ret = conn_id->id.event_handler(&conn_id->id, &event);
1132 if (!ret) { 1131 if (!ret) {
1132 /*
1133 * Acquire mutex to prevent user executing rdma_destroy_id()
1134 * while we're accessing the cm_id.
1135 */
1136 mutex_lock(&lock);
1137 if (cma_comp(conn_id, CMA_CONNECT) &&
1138 !cma_is_ud_ps(conn_id->id.ps))
1139 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1140 mutex_unlock(&lock);
1133 cma_enable_remove(conn_id); 1141 cma_enable_remove(conn_id);
1134 goto out; 1142 goto out;
1135 } 1143 }
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index c864ef70fdf9..5a4b2e65534b 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -686,8 +686,10 @@ int ib_device_register_sysfs(struct ib_device *device)
686 686
687 device->ports_parent = kobject_create_and_add("ports", 687 device->ports_parent = kobject_create_and_add("ports",
688 kobject_get(&class_dev->kobj)); 688 kobject_get(&class_dev->kobj));
689 if (!device->ports_parent) 689 if (!device->ports_parent) {
690 ret = -ENOMEM;
690 goto err_put; 691 goto err_put;
692 }
691 693
692 if (device->node_type == RDMA_NODE_IB_SWITCH) { 694 if (device->node_type == RDMA_NODE_IB_SWITCH) {
693 ret = add_port(device, 0); 695 ret = add_port(device, 0);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index e9a08fa3dffe..320f2b6ddee6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -35,6 +35,7 @@
35#include <linux/skbuff.h> 35#include <linux/skbuff.h>
36#include <linux/timer.h> 36#include <linux/timer.h>
37#include <linux/notifier.h> 37#include <linux/notifier.h>
38#include <linux/inetdevice.h>
38 39
39#include <net/neighbour.h> 40#include <net/neighbour.h>
40#include <net/netevent.h> 41#include <net/netevent.h>
@@ -1784,6 +1785,17 @@ err:
1784 return err; 1785 return err;
1785} 1786}
1786 1787
1788static int is_loopback_dst(struct iw_cm_id *cm_id)
1789{
1790 struct net_device *dev;
1791
1792 dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
1793 if (!dev)
1794 return 0;
1795 dev_put(dev);
1796 return 1;
1797}
1798
1787int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1799int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1788{ 1800{
1789 int err = 0; 1801 int err = 0;
@@ -1791,6 +1803,11 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1791 struct iwch_ep *ep; 1803 struct iwch_ep *ep;
1792 struct rtable *rt; 1804 struct rtable *rt;
1793 1805
1806 if (is_loopback_dst(cm_id)) {
1807 err = -ENOSYS;
1808 goto out;
1809 }
1810
1794 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1811 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1795 if (!ep) { 1812 if (!ep) {
1796 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1813 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 7dc91a3e712d..fe2c2e94a5f8 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -199,7 +199,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
199 if (err) 199 if (err)
200 goto err_free; 200 goto err_free;
201 201
202 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &fmr->mfmr.mr); 202 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
203 if (err) 203 if (err)
204 goto err_mr; 204 goto err_mr;
205 205
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 6bd9f1393349..1e1e336d3ef9 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -473,7 +473,7 @@ static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
473 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) 473 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
474 return; 474 return;
475 475
476 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd); 476 be16_add_cpu(&cqe->db_cnt, -dbd);
477 cqe->wqe = new_wqe; 477 cqe->wqe = new_wqe;
478 cqe->syndrome = SYNDROME_WR_FLUSH_ERR; 478 cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
479 479
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 1f4d27d7c16d..252db0822f6c 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -542,6 +542,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
542 for (i = 0; i < npages; ++i) { 542 for (i = 0; i < npages; ++i) {
543 db_tab->page[i].refcount = 0; 543 db_tab->page[i].refcount = 0;
544 db_tab->page[i].uvirt = 0; 544 db_tab->page[i].uvirt = 0;
545 sg_init_table(&db_tab->page[i].mem, 1);
545 } 546 }
546 547
547 return db_tab; 548 return db_tab;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 3b6985557cb2..3538da16e3fe 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -686,7 +686,7 @@ err_out_table:
686 mthca_table_put(dev, dev->mr_table.mpt_table, key); 686 mthca_table_put(dev, dev->mr_table.mpt_table, key);
687 687
688err_out_mpt_free: 688err_out_mpt_free:
689 mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey); 689 mthca_free(&dev->mr_table.mpt_alloc, key);
690 return err; 690 return err;
691} 691}
692 692
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b6cc265aa9a4..eee77da61935 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -148,14 +148,15 @@ static int nes_netdev_open(struct net_device *netdev)
148 struct nes_device *nesdev = nesvnic->nesdev; 148 struct nes_device *nesdev = nesvnic->nesdev;
149 int ret; 149 int ret;
150 int i; 150 int i;
151 struct nes_vnic *first_nesvnic; 151 struct nes_vnic *first_nesvnic = NULL;
152 u32 nic_active_bit; 152 u32 nic_active_bit;
153 u32 nic_active; 153 u32 nic_active;
154 struct list_head *list_pos, *list_temp;
154 155
155 assert(nesdev != NULL); 156 assert(nesdev != NULL);
156 157
157 first_nesvnic = list_entry(nesdev->nesadapter->nesvnic_list[nesdev->mac_index].next, 158 if (nesvnic->netdev_open == 1)
158 struct nes_vnic, list); 159 return 0;
159 160
160 if (netif_msg_ifup(nesvnic)) 161 if (netif_msg_ifup(nesvnic))
161 printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name); 162 printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name);
@@ -225,7 +226,18 @@ static int nes_netdev_open(struct net_device *netdev)
225 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT | 226 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
226 nesvnic->nic_cq.cq_number); 227 nesvnic->nic_cq.cq_number);
227 nes_read32(nesdev->regs+NES_CQE_ALLOC); 228 nes_read32(nesdev->regs+NES_CQE_ALLOC);
228 229 list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
230 first_nesvnic = container_of(list_pos, struct nes_vnic, list);
231 if (first_nesvnic->netdev_open == 1)
232 break;
233 }
234 if (first_nesvnic->netdev_open == 0) {
235 nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
236 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index),
237 ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
238 NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
239 first_nesvnic = nesvnic;
240 }
229 if (first_nesvnic->linkup) { 241 if (first_nesvnic->linkup) {
230 /* Enable network packets */ 242 /* Enable network packets */
231 nesvnic->linkup = 1; 243 nesvnic->linkup = 1;
@@ -248,6 +260,8 @@ static int nes_netdev_stop(struct net_device *netdev)
248 struct nes_device *nesdev = nesvnic->nesdev; 260 struct nes_device *nesdev = nesvnic->nesdev;
249 u32 nic_active_mask; 261 u32 nic_active_mask;
250 u32 nic_active; 262 u32 nic_active;
263 struct nes_vnic *first_nesvnic = NULL;
264 struct list_head *list_pos, *list_temp;
251 265
252 nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n", 266 nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
253 nesvnic, nesdev, netdev, netdev->name); 267 nesvnic, nesdev, netdev, netdev->name);
@@ -260,9 +274,20 @@ static int nes_netdev_stop(struct net_device *netdev)
260 /* Disable network packets */ 274 /* Disable network packets */
261 napi_disable(&nesvnic->napi); 275 napi_disable(&nesvnic->napi);
262 netif_stop_queue(netdev); 276 netif_stop_queue(netdev);
263 if ((nesdev->netdev[0] == netdev) & (nesvnic->logical_port == nesdev->mac_index)) { 277 list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
264 nes_write_indexed(nesdev, 278 first_nesvnic = container_of(list_pos, struct nes_vnic, list);
265 NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff); 279 if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic))
280 break;
281 }
282
283 if (first_nesvnic->netdev_open == 0)
284 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
285 else if ((first_nesvnic != nesvnic) &&
286 (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) != PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
287 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index), 0xffffffff);
288 nes_write_indexed(first_nesvnic->nesdev, NES_IDX_MAC_INT_MASK + (0x200 * first_nesvnic->nesdev->mac_index),
289 ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
290 NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
266 } 291 }
267 292
268 nic_active_mask = ~((u32)(1 << nesvnic->nic_index)); 293 nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
@@ -859,7 +884,6 @@ void nes_netdev_set_multicast_list(struct net_device *netdev)
859 for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) { 884 for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) {
860 while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0)) 885 while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0))
861 multicast_addr = multicast_addr->next; 886 multicast_addr = multicast_addr->next;
862
863 if (mc_nic_index < 0) 887 if (mc_nic_index < 0)
864 mc_nic_index = nesvnic->nic_index; 888 mc_nic_index = nesvnic->nic_index;
865 if (multicast_addr) { 889 if (multicast_addr) {
@@ -908,7 +932,7 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
908 return -EINVAL; 932 return -EINVAL;
909 933
910 netdev->mtu = new_mtu; 934 netdev->mtu = new_mtu;
911 nesvnic->max_frame_size = new_mtu+ETH_HLEN; 935 nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN;
912 936
913 if (netdev->mtu > 1500) { 937 if (netdev->mtu > 1500) {
914 jumbomode=1; 938 jumbomode=1;
@@ -1470,10 +1494,15 @@ static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_g
1470{ 1494{
1471 struct nes_vnic *nesvnic = netdev_priv(netdev); 1495 struct nes_vnic *nesvnic = netdev_priv(netdev);
1472 struct nes_device *nesdev = nesvnic->nesdev; 1496 struct nes_device *nesdev = nesvnic->nesdev;
1497 struct nes_adapter *nesadapter = nesdev->nesadapter;
1473 u32 u32temp; 1498 u32 u32temp;
1499 unsigned long flags;
1474 1500
1501 spin_lock_irqsave(&nesadapter->phy_lock, flags);
1475 nesvnic->vlan_grp = grp; 1502 nesvnic->vlan_grp = grp;
1476 1503
1504 nes_debug(NES_DBG_NETDEV, "%s: %s\n", __func__, netdev->name);
1505
1477 /* Enable/Disable VLAN Stripping */ 1506 /* Enable/Disable VLAN Stripping */
1478 u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG); 1507 u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
1479 if (grp) 1508 if (grp)
@@ -1482,6 +1511,7 @@ static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_g
1482 u32temp |= 0x02000000; 1511 u32temp |= 0x02000000;
1483 1512
1484 nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp); 1513 nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp);
1514 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
1485} 1515}
1486 1516
1487 1517
@@ -1540,7 +1570,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1540 nesvnic->msg_enable = netif_msg_init(debug, default_msg); 1570 nesvnic->msg_enable = netif_msg_init(debug, default_msg);
1541 nesvnic->netdev_index = nesdev->netdev_count; 1571 nesvnic->netdev_index = nesdev->netdev_count;
1542 nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count; 1572 nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count;
1543 nesvnic->max_frame_size = netdev->mtu+netdev->hard_header_len; 1573 nesvnic->max_frame_size = netdev->mtu + netdev->hard_header_len + VLAN_HLEN;
1544 1574
1545 curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)]; 1575 curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)];
1546 nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid; 1576 nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid;
@@ -1610,7 +1640,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1610 list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]); 1640 list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
1611 1641
1612 if ((nesdev->netdev_count == 0) && 1642 if ((nesdev->netdev_count == 0) &&
1613 (PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) { 1643 (PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) {
1614 nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n", 1644 nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
1615 NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1))); 1645 NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1)));
1616 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 1646 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
@@ -1648,18 +1678,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1648 nesvnic->linkup = 1; 1678 nesvnic->linkup = 1;
1649 } 1679 }
1650 } 1680 }
1651 nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
1652 /* clear the MAC interrupt status, assumes direct logical to physical mapping */ 1681 /* clear the MAC interrupt status, assumes direct logical to physical mapping */
1653 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port)); 1682 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
1654 nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp); 1683 nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
1655 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port), u32temp); 1684 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
1656 1685
1657 if (nesdev->nesadapter->phy_type[nesvnic->logical_port] != NES_PHY_TYPE_IRIS) 1686 if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_IRIS)
1658 nes_init_phy(nesdev); 1687 nes_init_phy(nesdev);
1659 1688
1660 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesvnic->logical_port),
1661 ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
1662 NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
1663 } 1689 }
1664 1690
1665 return netdev; 1691 return netdev;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index ffd4b425567f..4dafbe16e82a 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1337,7 +1337,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1337 NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq); 1337 NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
1338 /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n", 1338 /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n",
1339 nespd->mmap_db_index); */ 1339 nespd->mmap_db_index); */
1340 if (nesqp->mmap_sq_db_index > NES_MAX_USER_WQ_REGIONS) { 1340 if (nesqp->mmap_sq_db_index >= NES_MAX_USER_WQ_REGIONS) {
1341 nes_debug(NES_DBG_QP, 1341 nes_debug(NES_DBG_QP,
1342 "db index > max user regions, failing create QP\n"); 1342 "db index > max user regions, failing create QP\n");
1343 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); 1343 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index f9b7caa54143..054fab8e27a0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -209,7 +209,6 @@ struct ipoib_cm_tx {
209 unsigned tx_tail; 209 unsigned tx_tail;
210 unsigned long flags; 210 unsigned long flags;
211 u32 mtu; 211 u32 mtu;
212 struct ib_wc ibwc[IPOIB_NUM_WC];
213}; 212};
214 213
215struct ipoib_cm_rx_buf { 214struct ipoib_cm_rx_buf {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7dd2ec473d24..52b1bebfa744 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -824,7 +824,6 @@ void ipoib_cm_dev_stop(struct net_device *dev)
824 struct ipoib_dev_priv *priv = netdev_priv(dev); 824 struct ipoib_dev_priv *priv = netdev_priv(dev);
825 struct ipoib_cm_rx *p; 825 struct ipoib_cm_rx *p;
826 unsigned long begin; 826 unsigned long begin;
827 LIST_HEAD(list);
828 int ret; 827 int ret;
829 828
830 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) 829 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
@@ -857,9 +856,12 @@ void ipoib_cm_dev_stop(struct net_device *dev)
857 /* 856 /*
858 * assume the HW is wedged and just free up everything. 857 * assume the HW is wedged and just free up everything.
859 */ 858 */
860 list_splice_init(&priv->cm.rx_flush_list, &list); 859 list_splice_init(&priv->cm.rx_flush_list,
861 list_splice_init(&priv->cm.rx_error_list, &list); 860 &priv->cm.rx_reap_list);
862 list_splice_init(&priv->cm.rx_drain_list, &list); 861 list_splice_init(&priv->cm.rx_error_list,
862 &priv->cm.rx_reap_list);
863 list_splice_init(&priv->cm.rx_drain_list,
864 &priv->cm.rx_reap_list);
863 break; 865 break;
864 } 866 }
865 spin_unlock_irq(&priv->lock); 867 spin_unlock_irq(&priv->lock);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 9d3e778dc56d..08c4396cf418 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -780,6 +780,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
780 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { 780 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
781 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 781 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
782 ipoib_ib_dev_down(dev, 0); 782 ipoib_ib_dev_down(dev, 0);
783 ipoib_ib_dev_stop(dev, 0);
783 ipoib_pkey_dev_delay_open(dev); 784 ipoib_pkey_dev_delay_open(dev);
784 return; 785 return;
785 } 786 }
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index 936788272a5f..bd8a1d14b45d 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -416,7 +416,6 @@ static void poll_media_bay(struct media_bay_info* bay)
416 } 416 }
417} 417}
418 418
419#ifdef CONFIG_MAC_FLOPPY
420int check_media_bay(struct device_node *which_bay, int what) 419int check_media_bay(struct device_node *which_bay, int what)
421{ 420{
422 int i; 421 int i;
@@ -431,7 +430,6 @@ int check_media_bay(struct device_node *which_bay, int what)
431 return -ENODEV; 430 return -ENODEV;
432} 431}
433EXPORT_SYMBOL(check_media_bay); 432EXPORT_SYMBOL(check_media_bay);
434#endif /* CONFIG_MAC_FLOPPY */
435 433
436#ifdef CONFIG_BLK_DEV_IDE_PMAC 434#ifdef CONFIG_BLK_DEV_IDE_PMAC
437int check_media_bay_by_base(unsigned long base, int what) 435int check_media_bay_by_base(unsigned long base, int what)
@@ -700,7 +698,8 @@ static int media_bay_suspend(struct macio_dev *mdev, pm_message_t state)
700{ 698{
701 struct media_bay_info *bay = macio_get_drvdata(mdev); 699 struct media_bay_info *bay = macio_get_drvdata(mdev);
702 700
703 if (state.event != mdev->ofdev.dev.power.power_state.event && state.event == PM_EVENT_SUSPEND) { 701 if (state.event != mdev->ofdev.dev.power.power_state.event
702 && (state.event & PM_EVENT_SLEEP)) {
704 down(&bay->lock); 703 down(&bay->lock);
705 bay->sleeping = 1; 704 bay->sleeping = 1;
706 set_mb_power(bay, 0); 705 set_mb_power(bay, 0);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index a0585fb6da94..7aeceedcf7d4 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -206,16 +206,10 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
206/* copy the pathname of a file to a buffer */ 206/* copy the pathname of a file to a buffer */
207char *file_path(struct file *file, char *buf, int count) 207char *file_path(struct file *file, char *buf, int count)
208{ 208{
209 struct dentry *d;
210 struct vfsmount *v;
211
212 if (!buf) 209 if (!buf)
213 return NULL; 210 return NULL;
214 211
215 d = file->f_path.dentry; 212 buf = d_path(&file->f_path, buf, count);
216 v = file->f_path.mnt;
217
218 buf = d_path(d, v, buf, count);
219 213
220 return IS_ERR(buf) ? NULL : buf; 214 return IS_ERR(buf) ? NULL : buf;
221} 215}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index edc057f5cdcc..51605870f898 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -124,7 +124,7 @@ enum dm_raid1_error {
124struct mirror { 124struct mirror {
125 struct mirror_set *ms; 125 struct mirror_set *ms;
126 atomic_t error_count; 126 atomic_t error_count;
127 uint32_t error_type; 127 unsigned long error_type;
128 struct dm_dev *dev; 128 struct dm_dev *dev;
129 sector_t offset; 129 sector_t offset;
130}; 130};
@@ -1695,14 +1695,15 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1695 * information for a retry or there was no other 1695 * information for a retry or there was no other
1696 * mirror in-sync. 1696 * mirror in-sync.
1697 */ 1697 */
1698 DMERR_LIMIT("Mirror read failed from %s.", 1698 DMERR_LIMIT("Mirror read failed.");
1699 m->dev->name);
1700 return -EIO; 1699 return -EIO;
1701 } 1700 }
1701
1702 m = read_record->m;
1703
1702 DMERR("Mirror read failed from %s. Trying alternative device.", 1704 DMERR("Mirror read failed from %s. Trying alternative device.",
1703 m->dev->name); 1705 m->dev->name);
1704 1706
1705 m = read_record->m;
1706 fail_mirror(m, DM_RAID1_READ_ERROR); 1707 fail_mirror(m, DM_RAID1_READ_ERROR);
1707 1708
1708 /* 1709 /*
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f16062982383..e75b1437b58b 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -361,7 +361,7 @@ static int lookup_device(const char *path, dev_t *dev)
361 if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd))) 361 if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
362 return r; 362 return r;
363 363
364 inode = nd.dentry->d_inode; 364 inode = nd.path.dentry->d_inode;
365 if (!inode) { 365 if (!inode) {
366 r = -ENOENT; 366 r = -ENOENT;
367 goto out; 367 goto out;
@@ -375,7 +375,7 @@ static int lookup_device(const char *path, dev_t *dev)
375 *dev = inode->i_rdev; 375 *dev = inode->i_rdev;
376 376
377 out: 377 out:
378 path_release(&nd); 378 path_put(&nd.path);
379 return r; 379 return r;
380} 380}
381 381
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5fc326d3970e..7da6ec244e15 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5197,8 +5197,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
5197 chunk_kb ? "KB" : "B"); 5197 chunk_kb ? "KB" : "B");
5198 if (bitmap->file) { 5198 if (bitmap->file) {
5199 seq_printf(seq, ", file: "); 5199 seq_printf(seq, ", file: ");
5200 seq_path(seq, bitmap->file->f_path.mnt, 5200 seq_path(seq, &bitmap->file->f_path, " \t\n");
5201 bitmap->file->f_path.dentry," \t\n");
5202 } 5201 }
5203 5202
5204 seq_printf(seq, "\n"); 5203 seq_printf(seq, "\n");
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8f4a45346de7..11950698a2e7 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -25,11 +25,16 @@ config VIDEO_DEV
25 To compile this driver as a module, choose M here: the 25 To compile this driver as a module, choose M here: the
26 module will be called videodev. 26 module will be called videodev.
27 27
28config VIDEO_V4L2_COMMON
29 tristate
30 depends on (I2C || I2C=n) && VIDEO_DEV
31 default (I2C || I2C=n) && VIDEO_DEV
32
28config VIDEO_V4L1 33config VIDEO_V4L1
29 bool "Enable Video For Linux API 1 (DEPRECATED)" 34 bool "Enable Video For Linux API 1 (DEPRECATED)"
30 depends on VIDEO_DEV 35 depends on VIDEO_DEV && VIDEO_V4L2_COMMON
36 default VIDEO_DEV && VIDEO_V4L2_COMMON
31 select VIDEO_V4L1_COMPAT 37 select VIDEO_V4L1_COMPAT
32 default y
33 ---help--- 38 ---help---
34 Enables a compatibility API used by most V4L2 devices to allow 39 Enables a compatibility API used by most V4L2 devices to allow
35 its usage with legacy applications that supports only V4L1 api. 40 its usage with legacy applications that supports only V4L1 api.
@@ -39,7 +44,7 @@ config VIDEO_V4L1
39config VIDEO_V4L1_COMPAT 44config VIDEO_V4L1_COMPAT
40 bool "Enable Video For Linux API 1 compatible Layer" 45 bool "Enable Video For Linux API 1 compatible Layer"
41 depends on VIDEO_DEV 46 depends on VIDEO_DEV
42 default y 47 default VIDEO_DEV
43 ---help--- 48 ---help---
44 This api were developed to be used at Kernel 2.2 and 2.4, but 49 This api were developed to be used at Kernel 2.2 and 2.4, but
45 lacks support for several video standards. There are several 50 lacks support for several video standards. There are several
@@ -55,8 +60,8 @@ config VIDEO_V4L1_COMPAT
55 60
56config VIDEO_V4L2 61config VIDEO_V4L2
57 bool 62 bool
58 depends on VIDEO_DEV 63 depends on VIDEO_DEV && VIDEO_V4L2_COMMON
59 default y 64 default VIDEO_DEV && VIDEO_V4L2_COMMON
60 65
61source "drivers/media/video/Kconfig" 66source "drivers/media/video/Kconfig"
62 67
@@ -93,7 +98,7 @@ if VIDEO_TUNER_CUSTOMIZE
93 98
94config TUNER_XC2028 99config TUNER_XC2028
95 tristate "XCeive xc2028/xc3028 tuners" 100 tristate "XCeive xc2028/xc3028 tuners"
96 depends on I2C 101 depends on I2C && FW_LOADER
97 default m if VIDEO_TUNER_CUSTOMIZE 102 default m if VIDEO_TUNER_CUSTOMIZE
98 help 103 help
99 Say Y here to include support for the xc2028/xc3028 tuners. 104 Say Y here to include support for the xc2028/xc3028 tuners.
@@ -180,7 +185,6 @@ config VIDEO_TVEEPROM
180 185
181config DAB 186config DAB
182 boolean "DAB adapters" 187 boolean "DAB adapters"
183 default y
184 ---help--- 188 ---help---
185 Allow selecting support for for Digital Audio Broadcasting (DAB) 189 Allow selecting support for for Digital Audio Broadcasting (DAB)
186 Receiver adapters. 190 Receiver adapters.
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 06ca75911b7f..769c6f8142d2 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -4,6 +4,6 @@ config VIDEO_SAA7146
4 4
5config VIDEO_SAA7146_VV 5config VIDEO_SAA7146_VV
6 tristate 6 tristate
7 depends on VIDEO_DEV 7 depends on VIDEO_V4L2
8 select VIDEOBUF_DMA_SG 8 select VIDEOBUF_DMA_SG
9 select VIDEO_SAA7146 9 select VIDEO_SAA7146
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c
index a4a937c90534..2ab5a120470d 100644
--- a/drivers/media/common/ir-keymaps.c
+++ b/drivers/media/common/ir-keymaps.c
@@ -1987,3 +1987,49 @@ IR_KEYTAB_TYPE ir_codes_behold[IR_KEYTAB_SIZE] = {
1987}; 1987};
1988 1988
1989EXPORT_SYMBOL_GPL(ir_codes_behold); 1989EXPORT_SYMBOL_GPL(ir_codes_behold);
1990
1991/*
1992 * Remote control for the Genius TVGO A11MCE
1993 * Adrian Pardini <pardo.bsso@gmail.com>
1994 */
1995IR_KEYTAB_TYPE ir_codes_genius_tvgo_a11mce[IR_KEYTAB_SIZE] = {
1996 /* Keys 0 to 9 */
1997 [0x48] = KEY_0,
1998 [0x09] = KEY_1,
1999 [0x1d] = KEY_2,
2000 [0x1f] = KEY_3,
2001 [0x19] = KEY_4,
2002 [0x1b] = KEY_5,
2003 [0x11] = KEY_6,
2004 [0x17] = KEY_7,
2005 [0x12] = KEY_8,
2006 [0x16] = KEY_9,
2007
2008 [0x54] = KEY_RECORD, /* recording */
2009 [0x06] = KEY_MUTE, /* mute */
2010 [0x10] = KEY_POWER,
2011 [0x40] = KEY_LAST, /* recall */
2012 [0x4c] = KEY_CHANNELUP, /* channel / program + */
2013 [0x00] = KEY_CHANNELDOWN, /* channel / program - */
2014 [0x0d] = KEY_VOLUMEUP,
2015 [0x15] = KEY_VOLUMEDOWN,
2016 [0x4d] = KEY_OK, /* also labeled as Pause */
2017 [0x1c] = KEY_ZOOM, /* full screen and Stop*/
2018 [0x02] = KEY_MODE, /* AV Source or Rewind*/
2019 [0x04] = KEY_LIST, /* -/-- */
2020 /* small arrows above numbers */
2021 [0x1a] = KEY_NEXT, /* also Fast Forward */
2022 [0x0e] = KEY_PREVIOUS, /* also Rewind */
2023 /* these are in a rather non standard layout and have
2024 an alternate name written */
2025 [0x1e] = KEY_UP, /* Video Setting */
2026 [0x0a] = KEY_DOWN, /* Video Default */
2027 [0x05] = KEY_LEFT, /* Snapshot */
2028 [0x0c] = KEY_RIGHT, /* Hide Panel */
2029 /* Four buttons without label */
2030 [0x49] = KEY_RED,
2031 [0x0b] = KEY_GREEN,
2032 [0x13] = KEY_YELLOW,
2033 [0x50] = KEY_BLUE,
2034};
2035EXPORT_SYMBOL_GPL(ir_codes_genius_tvgo_a11mce);
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index c32dda973e92..bfbd5a841ebf 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -413,7 +413,6 @@ static int vbi_open(struct saa7146_dev *dev, struct file *file)
413 V4L2_FIELD_SEQ_TB, // FIXME: does this really work? 413 V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
414 sizeof(struct saa7146_buf), 414 sizeof(struct saa7146_buf),
415 file); 415 file);
416 mutex_init(&fh->vbi_q.lock);
417 416
418 init_timer(&fh->vbi_read_timeout); 417 init_timer(&fh->vbi_read_timeout);
419 fh->vbi_read_timeout.function = vbi_read_timeout; 418 fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index c31ab480d8e1..66fdbd0e6a6d 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1417,8 +1417,6 @@ static int video_open(struct saa7146_dev *dev, struct file *file)
1417 sizeof(struct saa7146_buf), 1417 sizeof(struct saa7146_buf),
1418 file); 1418 file);
1419 1419
1420 mutex_init(&fh->video_q.lock);
1421
1422 return 0; 1420 return 0;
1423} 1421}
1424 1422
diff --git a/drivers/media/dvb/bt8xx/bt878.c b/drivers/media/dvb/bt8xx/bt878.c
index c7bbb40223f5..56d8fab688bb 100644
--- a/drivers/media/dvb/bt8xx/bt878.c
+++ b/drivers/media/dvb/bt8xx/bt878.c
@@ -75,7 +75,11 @@ EXPORT_SYMBOL(bt878);
75#if defined(dprintk) 75#if defined(dprintk)
76#undef dprintk 76#undef dprintk
77#endif 77#endif
78#define dprintk if(bt878_debug) printk 78#define dprintk(fmt, arg...) \
79 do { \
80 if (bt878_debug) \
81 printk(KERN_DEBUG fmt, ##arg); \
82 } while (0)
79 83
80static void bt878_mem_free(struct bt878 *bt) 84static void bt878_mem_free(struct bt878 *bt)
81{ 85{
@@ -154,7 +158,7 @@ static int bt878_make_risc(struct bt878 *bt)
154 } 158 }
155 159
156 if (bt->line_count > 255) { 160 if (bt->line_count > 255) {
157 printk("bt878: buffer size error!\n"); 161 printk(KERN_ERR "bt878: buffer size error!\n");
158 return -EINVAL; 162 return -EINVAL;
159 } 163 }
160 return 0; 164 return 0;
@@ -285,7 +289,8 @@ static irqreturn_t bt878_irq(int irq, void *dev_id)
285 289
286 if (astat & (BT878_ASCERR | BT878_AOCERR)) { 290 if (astat & (BT878_ASCERR | BT878_AOCERR)) {
287 if (bt878_verbose) { 291 if (bt878_verbose) {
288 printk("bt878(%d): irq%s%s risc_pc=%08x\n", 292 printk(KERN_INFO
293 "bt878(%d): irq%s%s risc_pc=%08x\n",
289 bt->nr, 294 bt->nr,
290 (astat & BT878_ASCERR) ? " SCERR" : 295 (astat & BT878_ASCERR) ? " SCERR" :
291 "", 296 "",
@@ -295,8 +300,8 @@ static irqreturn_t bt878_irq(int irq, void *dev_id)
295 } 300 }
296 if (astat & (BT878_APABORT | BT878_ARIPERR | BT878_APPERR)) { 301 if (astat & (BT878_APABORT | BT878_ARIPERR | BT878_APPERR)) {
297 if (bt878_verbose) { 302 if (bt878_verbose) {
298 printk 303 printk(KERN_INFO
299 ("bt878(%d): irq%s%s%s risc_pc=%08x\n", 304 "bt878(%d): irq%s%s%s risc_pc=%08x\n",
300 bt->nr, 305 bt->nr,
301 (astat & BT878_APABORT) ? " PABORT" : 306 (astat & BT878_APABORT) ? " PABORT" :
302 "", 307 "",
@@ -308,8 +313,8 @@ static irqreturn_t bt878_irq(int irq, void *dev_id)
308 } 313 }
309 if (astat & (BT878_AFDSR | BT878_AFTRGT | BT878_AFBUS)) { 314 if (astat & (BT878_AFDSR | BT878_AFTRGT | BT878_AFBUS)) {
310 if (bt878_verbose) { 315 if (bt878_verbose) {
311 printk 316 printk(KERN_INFO
312 ("bt878(%d): irq%s%s%s risc_pc=%08x\n", 317 "bt878(%d): irq%s%s%s risc_pc=%08x\n",
313 bt->nr, 318 bt->nr,
314 (astat & BT878_AFDSR) ? " FDSR" : "", 319 (astat & BT878_AFDSR) ? " FDSR" : "",
315 (astat & BT878_AFTRGT) ? " FTRGT" : 320 (astat & BT878_AFTRGT) ? " FTRGT" :
@@ -510,7 +515,7 @@ static int __devinit bt878_probe(struct pci_dev *dev,
510*/ 515*/
511 516
512 if ((result = bt878_mem_alloc(bt))) { 517 if ((result = bt878_mem_alloc(bt))) {
513 printk("bt878: failed to allocate memory!\n"); 518 printk(KERN_ERR "bt878: failed to allocate memory!\n");
514 goto fail2; 519 goto fail2;
515 } 520 }
516 521
@@ -536,7 +541,7 @@ static void __devexit bt878_remove(struct pci_dev *pci_dev)
536 struct bt878 *bt = pci_get_drvdata(pci_dev); 541 struct bt878 *bt = pci_get_drvdata(pci_dev);
537 542
538 if (bt878_verbose) 543 if (bt878_verbose)
539 printk("bt878(%d): unloading\n", bt->nr); 544 printk(KERN_INFO "bt878(%d): unloading\n", bt->nr);
540 545
541 /* turn off all capturing, DMA and IRQs */ 546 /* turn off all capturing, DMA and IRQs */
542 btand(~0x13, BT878_AGPIO_DMA_CTL); 547 btand(~0x13, BT878_AGPIO_DMA_CTL);
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
index 88dc4367a2e3..3b9da9c25c6e 100644
--- a/drivers/media/dvb/dvb-usb/ttusb2.c
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -144,6 +144,7 @@ static int ttusb2_power_ctrl(struct dvb_usb_device *d, int onoff)
144static struct tda10086_config tda10086_config = { 144static struct tda10086_config tda10086_config = {
145 .demod_address = 0x0e, 145 .demod_address = 0x0e,
146 .invert = 0, 146 .invert = 0,
147 .diseqc_tone = 1,
147}; 148};
148 149
149static int ttusb2_frontend_attach(struct dvb_usb_adapter *adap) 150static int ttusb2_frontend_attach(struct dvb_usb_adapter *adap)
diff --git a/drivers/media/dvb/frontends/tda10086.c b/drivers/media/dvb/frontends/tda10086.c
index 9d26ace65151..0d2b69a99ad4 100644
--- a/drivers/media/dvb/frontends/tda10086.c
+++ b/drivers/media/dvb/frontends/tda10086.c
@@ -106,9 +106,12 @@ static int tda10086_write_mask(struct tda10086_state *state, int reg, int mask,
106static int tda10086_init(struct dvb_frontend* fe) 106static int tda10086_init(struct dvb_frontend* fe)
107{ 107{
108 struct tda10086_state* state = fe->demodulator_priv; 108 struct tda10086_state* state = fe->demodulator_priv;
109 u8 t22k_off = 0x80;
109 110
110 dprintk ("%s\n", __FUNCTION__); 111 dprintk ("%s\n", __FUNCTION__);
111 112
113 if (state->config->diseqc_tone)
114 t22k_off = 0;
112 // reset 115 // reset
113 tda10086_write_byte(state, 0x00, 0x00); 116 tda10086_write_byte(state, 0x00, 0x00);
114 msleep(10); 117 msleep(10);
@@ -158,7 +161,7 @@ static int tda10086_init(struct dvb_frontend* fe)
158 tda10086_write_byte(state, 0x3d, 0x80); 161 tda10086_write_byte(state, 0x3d, 0x80);
159 162
160 // setup SEC 163 // setup SEC
161 tda10086_write_byte(state, 0x36, 0x80); // all SEC off, no 22k tone 164 tda10086_write_byte(state, 0x36, t22k_off); // all SEC off, 22k tone
162 tda10086_write_byte(state, 0x34, (((1<<19) * (22000/1000)) / (SACLK/1000))); // } tone frequency 165 tda10086_write_byte(state, 0x34, (((1<<19) * (22000/1000)) / (SACLK/1000))); // } tone frequency
163 tda10086_write_byte(state, 0x35, (((1<<19) * (22000/1000)) / (SACLK/1000)) >> 8); // } 166 tda10086_write_byte(state, 0x35, (((1<<19) * (22000/1000)) / (SACLK/1000)) >> 8); // }
164 167
@@ -180,16 +183,20 @@ static void tda10086_diseqc_wait(struct tda10086_state *state)
180static int tda10086_set_tone (struct dvb_frontend* fe, fe_sec_tone_mode_t tone) 183static int tda10086_set_tone (struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
181{ 184{
182 struct tda10086_state* state = fe->demodulator_priv; 185 struct tda10086_state* state = fe->demodulator_priv;
186 u8 t22k_off = 0x80;
183 187
184 dprintk ("%s\n", __FUNCTION__); 188 dprintk ("%s\n", __FUNCTION__);
185 189
190 if (state->config->diseqc_tone)
191 t22k_off = 0;
192
186 switch (tone) { 193 switch (tone) {
187 case SEC_TONE_OFF: 194 case SEC_TONE_OFF:
188 tda10086_write_byte(state, 0x36, 0x80); 195 tda10086_write_byte(state, 0x36, t22k_off);
189 break; 196 break;
190 197
191 case SEC_TONE_ON: 198 case SEC_TONE_ON:
192 tda10086_write_byte(state, 0x36, 0x81); 199 tda10086_write_byte(state, 0x36, 0x01 + t22k_off);
193 break; 200 break;
194 } 201 }
195 202
@@ -202,9 +209,13 @@ static int tda10086_send_master_cmd (struct dvb_frontend* fe,
202 struct tda10086_state* state = fe->demodulator_priv; 209 struct tda10086_state* state = fe->demodulator_priv;
203 int i; 210 int i;
204 u8 oldval; 211 u8 oldval;
212 u8 t22k_off = 0x80;
205 213
206 dprintk ("%s\n", __FUNCTION__); 214 dprintk ("%s\n", __FUNCTION__);
207 215
216 if (state->config->diseqc_tone)
217 t22k_off = 0;
218
208 if (cmd->msg_len > 6) 219 if (cmd->msg_len > 6)
209 return -EINVAL; 220 return -EINVAL;
210 oldval = tda10086_read_byte(state, 0x36); 221 oldval = tda10086_read_byte(state, 0x36);
@@ -212,7 +223,8 @@ static int tda10086_send_master_cmd (struct dvb_frontend* fe,
212 for(i=0; i< cmd->msg_len; i++) { 223 for(i=0; i< cmd->msg_len; i++) {
213 tda10086_write_byte(state, 0x48+i, cmd->msg[i]); 224 tda10086_write_byte(state, 0x48+i, cmd->msg[i]);
214 } 225 }
215 tda10086_write_byte(state, 0x36, 0x88 | ((cmd->msg_len - 1) << 4)); 226 tda10086_write_byte(state, 0x36, (0x08 + t22k_off)
227 | ((cmd->msg_len - 1) << 4));
216 228
217 tda10086_diseqc_wait(state); 229 tda10086_diseqc_wait(state);
218 230
@@ -225,16 +237,20 @@ static int tda10086_send_burst (struct dvb_frontend* fe, fe_sec_mini_cmd_t minic
225{ 237{
226 struct tda10086_state* state = fe->demodulator_priv; 238 struct tda10086_state* state = fe->demodulator_priv;
227 u8 oldval = tda10086_read_byte(state, 0x36); 239 u8 oldval = tda10086_read_byte(state, 0x36);
240 u8 t22k_off = 0x80;
228 241
229 dprintk ("%s\n", __FUNCTION__); 242 dprintk ("%s\n", __FUNCTION__);
230 243
244 if (state->config->diseqc_tone)
245 t22k_off = 0;
246
231 switch(minicmd) { 247 switch(minicmd) {
232 case SEC_MINI_A: 248 case SEC_MINI_A:
233 tda10086_write_byte(state, 0x36, 0x84); 249 tda10086_write_byte(state, 0x36, 0x04 + t22k_off);
234 break; 250 break;
235 251
236 case SEC_MINI_B: 252 case SEC_MINI_B:
237 tda10086_write_byte(state, 0x36, 0x86); 253 tda10086_write_byte(state, 0x36, 0x06 + t22k_off);
238 break; 254 break;
239 } 255 }
240 256
diff --git a/drivers/media/dvb/frontends/tda10086.h b/drivers/media/dvb/frontends/tda10086.h
index ed584a8f4a89..eeceaeee78ff 100644
--- a/drivers/media/dvb/frontends/tda10086.h
+++ b/drivers/media/dvb/frontends/tda10086.h
@@ -33,6 +33,9 @@ struct tda10086_config
33 33
34 /* does the "inversion" need inverted? */ 34 /* does the "inversion" need inverted? */
35 u8 invert; 35 u8 invert;
36
37 /* do we need the diseqc signal with carrier? */
38 u8 diseqc_tone;
36}; 39};
37 40
38#if defined(CONFIG_DVB_TDA10086) || (defined(CONFIG_DVB_TDA10086_MODULE) && defined(MODULE)) 41#if defined(CONFIG_DVB_TDA10086) || (defined(CONFIG_DVB_TDA10086_MODULE) && defined(MODULE))
diff --git a/drivers/media/dvb/frontends/tda18271-common.c b/drivers/media/dvb/frontends/tda18271-common.c
index cebb6b90b7e0..bca570990613 100644
--- a/drivers/media/dvb/frontends/tda18271-common.c
+++ b/drivers/media/dvb/frontends/tda18271-common.c
@@ -171,7 +171,7 @@ int tda18271_read_extended(struct dvb_frontend *fe)
171 if (ret != 2) 171 if (ret != 2)
172 tda_err("ERROR: i2c_transfer returned: %d\n", ret); 172 tda_err("ERROR: i2c_transfer returned: %d\n", ret);
173 173
174 for (i = 0; i <= TDA18271_NUM_REGS; i++) { 174 for (i = 0; i < TDA18271_NUM_REGS; i++) {
175 /* don't update write-only registers */ 175 /* don't update write-only registers */
176 if ((i != R_EB9) && 176 if ((i != R_EB9) &&
177 (i != R_EB16) && 177 (i != R_EB16) &&
diff --git a/drivers/media/dvb/frontends/xc5000.h b/drivers/media/dvb/frontends/xc5000.h
index e0e84562aed1..32a5f1c86a16 100644
--- a/drivers/media/dvb/frontends/xc5000.h
+++ b/drivers/media/dvb/frontends/xc5000.h
@@ -45,7 +45,8 @@ struct xc5000_config {
45/* xc5000 callback command */ 45/* xc5000 callback command */
46#define XC5000_TUNER_RESET 0 46#define XC5000_TUNER_RESET 0
47 47
48#if defined(CONFIG_DVB_TUNER_XC5000) || defined(CONFIG_DVB_TUNER_XC5000_MODULE) 48#if defined(CONFIG_DVB_TUNER_XC5000) || \
49 (defined(CONFIG_DVB_TUNER_XC5000_MODULE) && defined(MODULE))
49extern struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe, 50extern struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
50 struct i2c_adapter *i2c, 51 struct i2c_adapter *i2c,
51 struct xc5000_config *cfg); 52 struct xc5000_config *cfg);
diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c
index aef6e36d7c5c..3e6b650fbb81 100644
--- a/drivers/media/dvb/ttpci/av7110_av.c
+++ b/drivers/media/dvb/ttpci/av7110_av.c
@@ -966,6 +966,7 @@ static u8 iframe_header[] = { 0x00, 0x00, 0x01, 0xe0, 0x00, 0x00, 0x80, 0x00, 0x
966static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len, int nonblock) 966static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len, int nonblock)
967{ 967{
968 int i, n; 968 int i, n;
969 int progressive = 0;
969 970
970 dprintk(2, "av7110:%p, \n", av7110); 971 dprintk(2, "av7110:%p, \n", av7110);
971 972
@@ -974,6 +975,14 @@ static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len
974 return -EBUSY; 975 return -EBUSY;
975 } 976 }
976 977
978 for (i = 0; i < len - 5; i++) {
979 /* get progressive flag from picture extension */
980 if (buf[i] == 0x00 && buf[i+1] == 0x00 &&
981 buf[i+2] == 0x01 && (unsigned char)buf[i+3] == 0xb5 &&
982 (buf[i+4] & 0xf0) == 0x10)
983 progressive = buf[i+5] & 0x08;
984 }
985
977 /* setting n always > 1, fixes problems when playing stillframes 986 /* setting n always > 1, fixes problems when playing stillframes
978 consisting of I- and P-Frames */ 987 consisting of I- and P-Frames */
979 n = MIN_IFRAME / len + 1; 988 n = MIN_IFRAME / len + 1;
@@ -985,7 +994,11 @@ static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len
985 dvb_play(av7110, buf, len, 0, 1); 994 dvb_play(av7110, buf, len, 0, 1);
986 995
987 av7110_ipack_flush(&av7110->ipack[1]); 996 av7110_ipack_flush(&av7110->ipack[1]);
988 return 0; 997
998 if (progressive)
999 return vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1);
1000 else
1001 return 0;
989} 1002}
990 1003
991 1004
diff --git a/drivers/media/dvb/ttpci/budget-av.c b/drivers/media/dvb/ttpci/budget-av.c
index 3439c9864f67..2d64d557b977 100644
--- a/drivers/media/dvb/ttpci/budget-av.c
+++ b/drivers/media/dvb/ttpci/budget-av.c
@@ -896,6 +896,7 @@ static u8 read_pwm(struct budget_av *budget_av)
896#define SUBID_DVBS_CINERGY1200 0x1154 896#define SUBID_DVBS_CINERGY1200 0x1154
897#define SUBID_DVBS_CYNERGY1200N 0x1155 897#define SUBID_DVBS_CYNERGY1200N 0x1155
898#define SUBID_DVBS_TV_STAR 0x0014 898#define SUBID_DVBS_TV_STAR 0x0014
899#define SUBID_DVBS_TV_STAR_PLUS_X4 0x0015
899#define SUBID_DVBS_TV_STAR_CI 0x0016 900#define SUBID_DVBS_TV_STAR_CI 0x0016
900#define SUBID_DVBS_EASYWATCH_1 0x001a 901#define SUBID_DVBS_EASYWATCH_1 0x001a
901#define SUBID_DVBS_EASYWATCH_2 0x001b 902#define SUBID_DVBS_EASYWATCH_2 0x001b
@@ -910,6 +911,7 @@ static u8 read_pwm(struct budget_av *budget_av)
910#define SUBID_DVBC_CINERGY1200 0x1156 911#define SUBID_DVBC_CINERGY1200 0x1156
911#define SUBID_DVBC_CINERGY1200_MK3 0x1176 912#define SUBID_DVBC_CINERGY1200_MK3 0x1176
912 913
914#define SUBID_DVBT_EASYWATCH 0x003a
913#define SUBID_DVBT_KNC1_PLUS 0x0031 915#define SUBID_DVBT_KNC1_PLUS 0x0031
914#define SUBID_DVBT_KNC1 0x0030 916#define SUBID_DVBT_KNC1 0x0030
915#define SUBID_DVBT_CINERGY1200 0x1157 917#define SUBID_DVBT_CINERGY1200 0x1157
@@ -957,6 +959,7 @@ static void frontend_init(struct budget_av *budget_av)
957 break; 959 break;
958 960
959 case SUBID_DVBS_TV_STAR: 961 case SUBID_DVBS_TV_STAR:
962 case SUBID_DVBS_TV_STAR_PLUS_X4:
960 case SUBID_DVBS_TV_STAR_CI: 963 case SUBID_DVBS_TV_STAR_CI:
961 case SUBID_DVBS_CYNERGY1200N: 964 case SUBID_DVBS_CYNERGY1200N:
962 case SUBID_DVBS_EASYWATCH: 965 case SUBID_DVBS_EASYWATCH:
@@ -1018,6 +1021,7 @@ static void frontend_init(struct budget_av *budget_av)
1018 } 1021 }
1019 break; 1022 break;
1020 1023
1024 case SUBID_DVBT_EASYWATCH:
1021 case SUBID_DVBT_KNC1: 1025 case SUBID_DVBT_KNC1:
1022 case SUBID_DVBT_KNC1_PLUS: 1026 case SUBID_DVBT_KNC1_PLUS:
1023 case SUBID_DVBT_CINERGY1200: 1027 case SUBID_DVBT_CINERGY1200:
@@ -1248,7 +1252,9 @@ MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S);
1248MAKE_BUDGET_INFO(satewps, "Satelco EasyWatch DVB-S", BUDGET_KNC1S); 1252MAKE_BUDGET_INFO(satewps, "Satelco EasyWatch DVB-S", BUDGET_KNC1S);
1249MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP); 1253MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP);
1250MAKE_BUDGET_INFO(satewcmk3, "Satelco EasyWatch DVB-C MK3", BUDGET_KNC1C_MK3); 1254MAKE_BUDGET_INFO(satewcmk3, "Satelco EasyWatch DVB-C MK3", BUDGET_KNC1C_MK3);
1255MAKE_BUDGET_INFO(satewt, "Satelco EasyWatch DVB-T", BUDGET_KNC1T);
1251MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP); 1256MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP);
1257MAKE_BUDGET_INFO(knc1spx4, "KNC1 DVB-S Plus X4", BUDGET_KNC1SP);
1252MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP); 1258MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP);
1253MAKE_BUDGET_INFO(knc1cmk3, "KNC1 DVB-C MK3", BUDGET_KNC1C_MK3); 1259MAKE_BUDGET_INFO(knc1cmk3, "KNC1 DVB-C MK3", BUDGET_KNC1C_MK3);
1254MAKE_BUDGET_INFO(knc1cpmk3, "KNC1 DVB-C Plus MK3", BUDGET_KNC1CP_MK3); 1260MAKE_BUDGET_INFO(knc1cpmk3, "KNC1 DVB-C Plus MK3", BUDGET_KNC1CP_MK3);
@@ -1266,12 +1272,14 @@ static struct pci_device_id pci_tbl[] = {
1266 MAKE_EXTENSION_PCI(knc1sp, 0x1131, 0x0011), 1272 MAKE_EXTENSION_PCI(knc1sp, 0x1131, 0x0011),
1267 MAKE_EXTENSION_PCI(knc1sp, 0x1894, 0x0011), 1273 MAKE_EXTENSION_PCI(knc1sp, 0x1894, 0x0011),
1268 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014), 1274 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014),
1275 MAKE_EXTENSION_PCI(knc1spx4, 0x1894, 0x0015),
1269 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016), 1276 MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016),
1270 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e), 1277 MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e),
1271 MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a), 1278 MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a),
1272 MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b), 1279 MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b),
1273 MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a), 1280 MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a),
1274 MAKE_EXTENSION_PCI(satewcmk3, 0x1894, 0x002c), 1281 MAKE_EXTENSION_PCI(satewcmk3, 0x1894, 0x002c),
1282 MAKE_EXTENSION_PCI(satewt, 0x1894, 0x003a),
1275 MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020), 1283 MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020),
1276 MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021), 1284 MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021),
1277 MAKE_EXTENSION_PCI(knc1cmk3, 0x1894, 0x0022), 1285 MAKE_EXTENSION_PCI(knc1cmk3, 0x1894, 0x0022),
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index 9268a82bada6..14b00f57b5de 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -351,6 +351,7 @@ static struct s5h1420_config s5h1420_config = {
351static struct tda10086_config tda10086_config = { 351static struct tda10086_config tda10086_config = {
352 .demod_address = 0x0e, 352 .demod_address = 0x0e,
353 .invert = 0, 353 .invert = 0,
354 .diseqc_tone = 1,
354}; 355};
355 356
356static u8 read_pwm(struct budget* budget) 357static u8 read_pwm(struct budget* budget)
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 8d5214f18cf0..1b41b3f77cf9 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -4,12 +4,12 @@
4 4
5menuconfig RADIO_ADAPTERS 5menuconfig RADIO_ADAPTERS
6 bool "Radio Adapters" 6 bool "Radio Adapters"
7 depends on VIDEO_DEV 7 depends on VIDEO_V4L2
8 default y 8 default y
9 ---help--- 9 ---help---
10 Say Y here to enable selecting AM/FM radio adapters. 10 Say Y here to enable selecting AM/FM radio adapters.
11 11
12if RADIO_ADAPTERS && VIDEO_DEV 12if RADIO_ADAPTERS && VIDEO_V4L2
13 13
14config RADIO_CADET 14config RADIO_CADET
15 tristate "ADS Cadet AM/FM Tuner" 15 tristate "ADS Cadet AM/FM Tuner"
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index 3118bdab3183..53e114857377 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -361,6 +361,7 @@ static int __init fmi_init(void)
361 } 361 }
362 if (!request_region(io, 2, "radio-sf16fmi")) { 362 if (!request_region(io, 2, "radio-sf16fmi")) {
363 printk(KERN_ERR "radio-sf16fmi: port 0x%x already in use\n", io); 363 printk(KERN_ERR "radio-sf16fmi: port 0x%x already in use\n", io);
364 pnp_device_detach(dev);
364 return -EBUSY; 365 return -EBUSY;
365 } 366 }
366 367
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index f7c8b000404f..ebc5fbbc38bb 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -470,9 +470,8 @@ static int __init fmr2_init(void)
470 470
471 mutex_init(&lock); 471 mutex_init(&lock);
472 472
473 if (request_region(io, 2, "sf16fmr2")) 473 if (!request_region(io, 2, "sf16fmr2")) {
474 { 474 printk(KERN_ERR "radio-sf16fmr2: request_region failed!\n");
475 printk(KERN_ERR "fmr2: port 0x%x already in use\n", io);
476 return -EBUSY; 475 return -EBUSY;
477 } 476 }
478 477
diff --git a/drivers/media/radio/radio-si470x.c b/drivers/media/radio/radio-si470x.c
index 8e4bd4769048..649f14d2c013 100644
--- a/drivers/media/radio/radio-si470x.c
+++ b/drivers/media/radio/radio-si470x.c
@@ -62,6 +62,29 @@
62 * - code cleaned of unnecessary rds_commands 62 * - code cleaned of unnecessary rds_commands
63 * - USB Vendor/Product ID for ADS/Tech FM Radio Receiver verified 63 * - USB Vendor/Product ID for ADS/Tech FM Radio Receiver verified
64 * (thanks to Guillaume RAMOUSSE) 64 * (thanks to Guillaume RAMOUSSE)
65 * 2008-01-27 Tobias Lorenz <tobias.lorenz@gmx.net>
66 * Version 1.0.5
67 * - number of seek_retries changed to tune_timeout
68 * - fixed problem with incomplete tune operations by own buffers
69 * - optimization of variables and printf types
70 * - improved error logging
71 * 2008-01-31 Tobias Lorenz <tobias.lorenz@gmx.net>
72 * Oliver Neukum <oliver@neukum.org>
73 * Version 1.0.6
74 * - fixed coverity checker warnings in *_usb_driver_disconnect
75 * - probe()/open() race by correct ordering in probe()
76 * - DMA coherency rules by separate allocation of all buffers
77 * - use of endianness macros
78 * - abuse of spinlock, replaced by mutex
79 * - racy handling of timer in disconnect,
80 * replaced by delayed_work
81 * - racy interruptible_sleep_on(),
82 * replaced with wait_event_interruptible()
83 * - handle signals in read()
84 * 2008-02-08 Tobias Lorenz <tobias.lorenz@gmx.net>
85 * Oliver Neukum <oliver@neukum.org>
86 * Version 1.0.7
87 * - usb autosuspend support
65 * 88 *
66 * ToDo: 89 * ToDo:
67 * - add seeking support 90 * - add seeking support
@@ -74,9 +97,10 @@
74/* driver definitions */ 97/* driver definitions */
75#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>" 98#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>"
76#define DRIVER_NAME "radio-si470x" 99#define DRIVER_NAME "radio-si470x"
77#define DRIVER_VERSION KERNEL_VERSION(1, 0, 4) 100#define DRIVER_KERNEL_VERSION KERNEL_VERSION(1, 0, 6)
78#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver" 101#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
79#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers" 102#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers"
103#define DRIVER_VERSION "1.0.6"
80 104
81 105
82/* kernel includes */ 106/* kernel includes */
@@ -89,8 +113,10 @@
89#include <linux/hid.h> 113#include <linux/hid.h>
90#include <linux/version.h> 114#include <linux/version.h>
91#include <linux/videodev2.h> 115#include <linux/videodev2.h>
116#include <linux/mutex.h>
92#include <media/v4l2-common.h> 117#include <media/v4l2-common.h>
93#include <media/rds.h> 118#include <media/rds.h>
119#include <asm/unaligned.h>
94 120
95 121
96/* USB Device ID List */ 122/* USB Device ID List */
@@ -119,56 +145,56 @@ MODULE_PARM_DESC(radio_nr, "Radio Nr");
119/* 0: 200 kHz (USA, Australia) */ 145/* 0: 200 kHz (USA, Australia) */
120/* 1: 100 kHz (Europe, Japan) */ 146/* 1: 100 kHz (Europe, Japan) */
121/* 2: 50 kHz */ 147/* 2: 50 kHz */
122static int space = 2; 148static unsigned short space = 2;
123module_param(space, int, 0); 149module_param(space, ushort, 0);
124MODULE_PARM_DESC(radio_nr, "Spacing: 0=200kHz 1=100kHz *2=50kHz*"); 150MODULE_PARM_DESC(radio_nr, "Spacing: 0=200kHz 1=100kHz *2=50kHz*");
125 151
126/* Bottom of Band (MHz) */ 152/* Bottom of Band (MHz) */
127/* 0: 87.5 - 108 MHz (USA, Europe)*/ 153/* 0: 87.5 - 108 MHz (USA, Europe)*/
128/* 1: 76 - 108 MHz (Japan wide band) */ 154/* 1: 76 - 108 MHz (Japan wide band) */
129/* 2: 76 - 90 MHz (Japan) */ 155/* 2: 76 - 90 MHz (Japan) */
130static int band = 1; 156static unsigned short band = 1;
131module_param(band, int, 0); 157module_param(band, ushort, 0);
132MODULE_PARM_DESC(radio_nr, "Band: 0=87.5..108MHz *1=76..108MHz* 2=76..90MHz"); 158MODULE_PARM_DESC(radio_nr, "Band: 0=87.5..108MHz *1=76..108MHz* 2=76..90MHz");
133 159
134/* De-emphasis */ 160/* De-emphasis */
135/* 0: 75 us (USA) */ 161/* 0: 75 us (USA) */
136/* 1: 50 us (Europe, Australia, Japan) */ 162/* 1: 50 us (Europe, Australia, Japan) */
137static int de = 1; 163static unsigned short de = 1;
138module_param(de, int, 0); 164module_param(de, ushort, 0);
139MODULE_PARM_DESC(radio_nr, "De-emphasis: 0=75us *1=50us*"); 165MODULE_PARM_DESC(radio_nr, "De-emphasis: 0=75us *1=50us*");
140 166
141/* USB timeout */ 167/* USB timeout */
142static int usb_timeout = 500; 168static unsigned int usb_timeout = 500;
143module_param(usb_timeout, int, 0); 169module_param(usb_timeout, uint, 0);
144MODULE_PARM_DESC(usb_timeout, "USB timeout (ms): *500*"); 170MODULE_PARM_DESC(usb_timeout, "USB timeout (ms): *500*");
145 171
146/* Seek retries */ 172/* Tune timeout */
147static int seek_retries = 100; 173static unsigned int tune_timeout = 3000;
148module_param(seek_retries, int, 0); 174module_param(tune_timeout, uint, 0);
149MODULE_PARM_DESC(seek_retries, "Seek retries: *100*"); 175MODULE_PARM_DESC(tune_timeout, "Tune timeout: *3000*");
150 176
151/* RDS buffer blocks */ 177/* RDS buffer blocks */
152static int rds_buf = 100; 178static unsigned int rds_buf = 100;
153module_param(rds_buf, int, 0); 179module_param(rds_buf, uint, 0);
154MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*"); 180MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*");
155 181
156/* RDS maximum block errors */ 182/* RDS maximum block errors */
157static int max_rds_errors = 1; 183static unsigned short max_rds_errors = 1;
158/* 0 means 0 errors requiring correction */ 184/* 0 means 0 errors requiring correction */
159/* 1 means 1-2 errors requiring correction (used by original USBRadio.exe) */ 185/* 1 means 1-2 errors requiring correction (used by original USBRadio.exe) */
160/* 2 means 3-5 errors requiring correction */ 186/* 2 means 3-5 errors requiring correction */
161/* 3 means 6+ errors or errors in checkword, correction not possible */ 187/* 3 means 6+ errors or errors in checkword, correction not possible */
162module_param(max_rds_errors, int, 0); 188module_param(max_rds_errors, ushort, 0);
163MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*"); 189MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
164 190
165/* RDS poll frequency */ 191/* RDS poll frequency */
166static int rds_poll_time = 40; 192static unsigned int rds_poll_time = 40;
167/* 40 is used by the original USBRadio.exe */ 193/* 40 is used by the original USBRadio.exe */
168/* 50 is used by radio-cadet */ 194/* 50 is used by radio-cadet */
169/* 75 should be okay */ 195/* 75 should be okay */
170/* 80 is the usual RDS receive interval */ 196/* 80 is the usual RDS receive interval */
171module_param(rds_poll_time, int, 0); 197module_param(rds_poll_time, uint, 0);
172MODULE_PARM_DESC(rds_poll_time, "RDS poll time (ms): *40*"); 198MODULE_PARM_DESC(rds_poll_time, "RDS poll time (ms): *40*");
173 199
174 200
@@ -393,22 +419,19 @@ MODULE_PARM_DESC(rds_poll_time, "RDS poll time (ms): *40*");
393struct si470x_device { 419struct si470x_device {
394 /* reference to USB and video device */ 420 /* reference to USB and video device */
395 struct usb_device *usbdev; 421 struct usb_device *usbdev;
422 struct usb_interface *intf;
396 struct video_device *videodev; 423 struct video_device *videodev;
397 424
398 /* are these really necessary ? */ 425 /* driver management */
399 int users; 426 unsigned int users;
400
401 /* report buffer (maximum 64 bytes) */
402 unsigned char buf[64];
403 427
404 /* Silabs internal registers (0..15) */ 428 /* Silabs internal registers (0..15) */
405 unsigned short registers[RADIO_REGISTER_NUM]; 429 unsigned short registers[RADIO_REGISTER_NUM];
406 430
407 /* RDS receive buffer */ 431 /* RDS receive buffer */
408 struct work_struct work; 432 struct delayed_work work;
409 wait_queue_head_t read_queue; 433 wait_queue_head_t read_queue;
410 struct timer_list timer; 434 struct mutex lock; /* buffer locking */
411 spinlock_t lock; /* buffer locking */
412 unsigned char *buffer; /* size is always multiple of three */ 435 unsigned char *buffer; /* size is always multiple of three */
413 unsigned int buf_size; 436 unsigned int buf_size;
414 unsigned int rd_index; 437 unsigned int rd_index;
@@ -434,28 +457,46 @@ struct si470x_device {
434/* 457/*
435 * si470x_get_report - receive a HID report 458 * si470x_get_report - receive a HID report
436 */ 459 */
437static int si470x_get_report(struct si470x_device *radio, int size) 460static int si470x_get_report(struct si470x_device *radio, void *buf, int size)
438{ 461{
439 return usb_control_msg(radio->usbdev, 462 unsigned char *report = (unsigned char *) buf;
463 int retval;
464
465 retval = usb_control_msg(radio->usbdev,
440 usb_rcvctrlpipe(radio->usbdev, 0), 466 usb_rcvctrlpipe(radio->usbdev, 0),
441 HID_REQ_GET_REPORT, 467 HID_REQ_GET_REPORT,
442 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 468 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
443 radio->buf[0], 2, 469 report[0], 2,
444 radio->buf, size, usb_timeout); 470 buf, size, usb_timeout);
471 if (retval < 0)
472 printk(KERN_WARNING DRIVER_NAME
473 ": si470x_get_report: usb_control_msg returned %d\n",
474 retval);
475
476 return retval;
445} 477}
446 478
447 479
448/* 480/*
449 * si470x_set_report - send a HID report 481 * si470x_set_report - send a HID report
450 */ 482 */
451static int si470x_set_report(struct si470x_device *radio, int size) 483static int si470x_set_report(struct si470x_device *radio, void *buf, int size)
452{ 484{
453 return usb_control_msg(radio->usbdev, 485 unsigned char *report = (unsigned char *) buf;
486 int retval;
487
488 retval = usb_control_msg(radio->usbdev,
454 usb_sndctrlpipe(radio->usbdev, 0), 489 usb_sndctrlpipe(radio->usbdev, 0),
455 HID_REQ_SET_REPORT, 490 HID_REQ_SET_REPORT,
456 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, 491 USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
457 radio->buf[0], 2, 492 report[0], 2,
458 radio->buf, size, usb_timeout); 493 buf, size, usb_timeout);
494 if (retval < 0)
495 printk(KERN_WARNING DRIVER_NAME
496 ": si470x_set_report: usb_control_msg returned %d\n",
497 retval);
498
499 return retval;
459} 500}
460 501
461 502
@@ -464,13 +505,16 @@ static int si470x_set_report(struct si470x_device *radio, int size)
464 */ 505 */
465static int si470x_get_register(struct si470x_device *radio, int regnr) 506static int si470x_get_register(struct si470x_device *radio, int regnr)
466{ 507{
508 unsigned char buf[REGISTER_REPORT_SIZE];
467 int retval; 509 int retval;
468 510
469 radio->buf[0] = REGISTER_REPORT(regnr); 511 buf[0] = REGISTER_REPORT(regnr);
512
513 retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
470 514
471 retval = si470x_get_report(radio, REGISTER_REPORT_SIZE);
472 if (retval >= 0) 515 if (retval >= 0)
473 radio->registers[regnr] = (radio->buf[1] << 8) | radio->buf[2]; 516 radio->registers[regnr] = be16_to_cpu(get_unaligned(
517 (unsigned short *) &buf[1]));
474 518
475 return (retval < 0) ? -EINVAL : 0; 519 return (retval < 0) ? -EINVAL : 0;
476} 520}
@@ -481,13 +525,14 @@ static int si470x_get_register(struct si470x_device *radio, int regnr)
481 */ 525 */
482static int si470x_set_register(struct si470x_device *radio, int regnr) 526static int si470x_set_register(struct si470x_device *radio, int regnr)
483{ 527{
528 unsigned char buf[REGISTER_REPORT_SIZE];
484 int retval; 529 int retval;
485 530
486 radio->buf[0] = REGISTER_REPORT(regnr); 531 buf[0] = REGISTER_REPORT(regnr);
487 radio->buf[1] = (radio->registers[regnr] & 0xff00) >> 8; 532 put_unaligned(cpu_to_be16(radio->registers[regnr]),
488 radio->buf[2] = (radio->registers[regnr] & 0x00ff); 533 (unsigned short *) &buf[1]);
489 534
490 retval = si470x_set_report(radio, REGISTER_REPORT_SIZE); 535 retval = si470x_set_report(radio, (void *) &buf, sizeof(buf));
491 536
492 return (retval < 0) ? -EINVAL : 0; 537 return (retval < 0) ? -EINVAL : 0;
493} 538}
@@ -498,18 +543,19 @@ static int si470x_set_register(struct si470x_device *radio, int regnr)
498 */ 543 */
499static int si470x_get_all_registers(struct si470x_device *radio) 544static int si470x_get_all_registers(struct si470x_device *radio)
500{ 545{
546 unsigned char buf[ENTIRE_REPORT_SIZE];
501 int retval; 547 int retval;
502 int regnr; 548 unsigned char regnr;
503 549
504 radio->buf[0] = ENTIRE_REPORT; 550 buf[0] = ENTIRE_REPORT;
505 551
506 retval = si470x_get_report(radio, ENTIRE_REPORT_SIZE); 552 retval = si470x_get_report(radio, (void *) &buf, sizeof(buf));
507 553
508 if (retval >= 0) 554 if (retval >= 0)
509 for (regnr = 0; regnr < RADIO_REGISTER_NUM; regnr++) 555 for (regnr = 0; regnr < RADIO_REGISTER_NUM; regnr++)
510 radio->registers[regnr] = 556 radio->registers[regnr] = be16_to_cpu(get_unaligned(
511 (radio->buf[regnr * RADIO_REGISTER_SIZE + 1] << 8) | 557 (unsigned short *)
512 radio->buf[regnr * RADIO_REGISTER_SIZE + 2]; 558 &buf[regnr * RADIO_REGISTER_SIZE + 1]));
513 559
514 return (retval < 0) ? -EINVAL : 0; 560 return (retval < 0) ? -EINVAL : 0;
515} 561}
@@ -520,21 +566,28 @@ static int si470x_get_all_registers(struct si470x_device *radio)
520 */ 566 */
521static int si470x_get_rds_registers(struct si470x_device *radio) 567static int si470x_get_rds_registers(struct si470x_device *radio)
522{ 568{
569 unsigned char buf[RDS_REPORT_SIZE];
523 int retval; 570 int retval;
524 int regnr;
525 int size; 571 int size;
572 unsigned char regnr;
526 573
527 radio->buf[0] = RDS_REPORT; 574 buf[0] = RDS_REPORT;
528 575
529 retval = usb_interrupt_msg(radio->usbdev, 576 retval = usb_interrupt_msg(radio->usbdev,
530 usb_rcvctrlpipe(radio->usbdev, 1), 577 usb_rcvintpipe(radio->usbdev, 1),
531 radio->buf, RDS_REPORT_SIZE, &size, usb_timeout); 578 (void *) &buf, sizeof(buf), &size, usb_timeout);
579 if (size != sizeof(buf))
580 printk(KERN_WARNING DRIVER_NAME ": si470x_get_rds_register: "
581 "return size differs: %d != %zu\n", size, sizeof(buf));
582 if (retval < 0)
583 printk(KERN_WARNING DRIVER_NAME ": si470x_get_rds_registers: "
584 "usb_interrupt_msg returned %d\n", retval);
532 585
533 if (retval >= 0) 586 if (retval >= 0)
534 for (regnr = 0; regnr < RDS_REGISTER_NUM; regnr++) 587 for (regnr = 0; regnr < RDS_REGISTER_NUM; regnr++)
535 radio->registers[STATUSRSSI + regnr] = 588 radio->registers[STATUSRSSI + regnr] =
536 (radio->buf[regnr * RADIO_REGISTER_SIZE + 1] << 8) | 589 be16_to_cpu(get_unaligned((unsigned short *)
537 radio->buf[regnr * RADIO_REGISTER_SIZE + 2]; 590 &buf[regnr * RADIO_REGISTER_SIZE + 1]));
538 591
539 return (retval < 0) ? -EINVAL : 0; 592 return (retval < 0) ? -EINVAL : 0;
540} 593}
@@ -543,9 +596,11 @@ static int si470x_get_rds_registers(struct si470x_device *radio)
543/* 596/*
544 * si470x_set_chan - set the channel 597 * si470x_set_chan - set the channel
545 */ 598 */
546static int si470x_set_chan(struct si470x_device *radio, int chan) 599static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
547{ 600{
548 int retval, i; 601 int retval;
602 unsigned long timeout;
603 bool timed_out = 0;
549 604
550 /* start tuning */ 605 /* start tuning */
551 radio->registers[CHANNEL] &= ~CHANNEL_CHAN; 606 radio->registers[CHANNEL] &= ~CHANNEL_CHAN;
@@ -555,16 +610,17 @@ static int si470x_set_chan(struct si470x_device *radio, int chan)
555 return retval; 610 return retval;
556 611
557 /* wait till seek operation has completed */ 612 /* wait till seek operation has completed */
558 i = 0; 613 timeout = jiffies + msecs_to_jiffies(tune_timeout);
559 do { 614 do {
560 retval = si470x_get_register(radio, STATUSRSSI); 615 retval = si470x_get_register(radio, STATUSRSSI);
561 if (retval < 0) 616 if (retval < 0)
562 return retval; 617 return retval;
563 } while ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) && 618 timed_out = time_after(jiffies, timeout);
564 (++i < seek_retries)); 619 } while (((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0) &&
565 if (i >= seek_retries) 620 (!timed_out));
621 if (timed_out)
566 printk(KERN_WARNING DRIVER_NAME 622 printk(KERN_WARNING DRIVER_NAME
567 ": seek does not finish after %d tries\n", i); 623 ": seek does not finish after %u ms\n", tune_timeout);
568 624
569 /* stop tuning */ 625 /* stop tuning */
570 radio->registers[CHANNEL] &= ~CHANNEL_TUNE; 626 radio->registers[CHANNEL] &= ~CHANNEL_TUNE;
@@ -575,9 +631,10 @@ static int si470x_set_chan(struct si470x_device *radio, int chan)
575/* 631/*
576 * si470x_get_freq - get the frequency 632 * si470x_get_freq - get the frequency
577 */ 633 */
578static int si470x_get_freq(struct si470x_device *radio) 634static unsigned int si470x_get_freq(struct si470x_device *radio)
579{ 635{
580 int spacing, band_bottom, chan, freq; 636 unsigned int spacing, band_bottom, freq;
637 unsigned short chan;
581 int retval; 638 int retval;
582 639
583 /* Spacing (kHz) */ 640 /* Spacing (kHz) */
@@ -616,9 +673,10 @@ static int si470x_get_freq(struct si470x_device *radio)
616/* 673/*
617 * si470x_set_freq - set the frequency 674 * si470x_set_freq - set the frequency
618 */ 675 */
619static int si470x_set_freq(struct si470x_device *radio, int freq) 676static int si470x_set_freq(struct si470x_device *radio, unsigned int freq)
620{ 677{
621 int spacing, band_bottom, chan; 678 unsigned int spacing, band_bottom;
679 unsigned short chan;
622 680
623 /* Spacing (kHz) */ 681 /* Spacing (kHz) */
624 switch (space) { 682 switch (space) {
@@ -709,9 +767,17 @@ static int si470x_stop(struct si470x_device *radio)
709 */ 767 */
710static int si470x_rds_on(struct si470x_device *radio) 768static int si470x_rds_on(struct si470x_device *radio)
711{ 769{
770 int retval;
771
712 /* sysconfig 1 */ 772 /* sysconfig 1 */
773 mutex_lock(&radio->lock);
713 radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDS; 774 radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDS;
714 return si470x_set_register(radio, SYSCONFIG1); 775 retval = si470x_set_register(radio, SYSCONFIG1);
776 if (retval < 0)
777 radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_RDS;
778 mutex_unlock(&radio->lock);
779
780 return retval;
715} 781}
716 782
717 783
@@ -725,11 +791,10 @@ static int si470x_rds_on(struct si470x_device *radio)
725 */ 791 */
726static void si470x_rds(struct si470x_device *radio) 792static void si470x_rds(struct si470x_device *radio)
727{ 793{
728 unsigned char tmpbuf[3];
729 unsigned char blocknum; 794 unsigned char blocknum;
730 unsigned char bler; /* rds block errors */ 795 unsigned short bler; /* rds block errors */
731 unsigned short rds; 796 unsigned short rds;
732 unsigned int i; 797 unsigned char tmpbuf[3];
733 798
734 /* get rds blocks */ 799 /* get rds blocks */
735 if (si470x_get_rds_registers(radio) < 0) 800 if (si470x_get_rds_registers(radio) < 0)
@@ -743,63 +808,58 @@ static void si470x_rds(struct si470x_device *radio)
743 return; 808 return;
744 } 809 }
745 810
746 /* copy four RDS blocks to internal buffer */ 811 /* copy all four RDS blocks to internal buffer */
747 if (spin_trylock(&radio->lock)) { 812 mutex_lock(&radio->lock);
748 /* process each rds block */ 813 for (blocknum = 0; blocknum < 4; blocknum++) {
749 for (blocknum = 0; blocknum < 4; blocknum++) { 814 switch (blocknum) {
750 switch (blocknum) { 815 default:
751 default: 816 bler = (radio->registers[STATUSRSSI] &
752 bler = (radio->registers[STATUSRSSI] & 817 STATUSRSSI_BLERA) >> 9;
753 STATUSRSSI_BLERA) >> 9; 818 rds = radio->registers[RDSA];
754 rds = radio->registers[RDSA]; 819 break;
755 break; 820 case 1:
756 case 1: 821 bler = (radio->registers[READCHAN] &
757 bler = (radio->registers[READCHAN] & 822 READCHAN_BLERB) >> 14;
758 READCHAN_BLERB) >> 14; 823 rds = radio->registers[RDSB];
759 rds = radio->registers[RDSB]; 824 break;
760 break; 825 case 2:
761 case 2: 826 bler = (radio->registers[READCHAN] &
762 bler = (radio->registers[READCHAN] & 827 READCHAN_BLERC) >> 12;
763 READCHAN_BLERC) >> 12; 828 rds = radio->registers[RDSC];
764 rds = radio->registers[RDSC]; 829 break;
765 break; 830 case 3:
766 case 3: 831 bler = (radio->registers[READCHAN] &
767 bler = (radio->registers[READCHAN] & 832 READCHAN_BLERD) >> 10;
768 READCHAN_BLERD) >> 10; 833 rds = radio->registers[RDSD];
769 rds = radio->registers[RDSD]; 834 break;
770 break; 835 };
771 }; 836
772 837 /* Fill the V4L2 RDS buffer */
773 /* Fill the V4L2 RDS buffer */ 838 put_unaligned(cpu_to_le16(rds), (unsigned short *) &tmpbuf);
774 tmpbuf[0] = rds & 0x00ff; /* LSB */ 839 tmpbuf[2] = blocknum; /* offset name */
775 tmpbuf[1] = (rds & 0xff00) >> 8;/* MSB */ 840 tmpbuf[2] |= blocknum << 3; /* received offset */
776 tmpbuf[2] = blocknum; /* offset name */ 841 if (bler > max_rds_errors)
777 tmpbuf[2] |= blocknum << 3; /* received offset */ 842 tmpbuf[2] |= 0x80; /* uncorrectable errors */
778 if (bler > max_rds_errors) 843 else if (bler > 0)
779 tmpbuf[2] |= 0x80; /* uncorrectable errors */ 844 tmpbuf[2] |= 0x40; /* corrected error(s) */
780 else if (bler > 0) 845
781 tmpbuf[2] |= 0x40; /* corrected error(s) */ 846 /* copy RDS block to internal buffer */
782 847 memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3);
783 /* copy RDS block to internal buffer */ 848 radio->wr_index += 3;
784 for (i = 0; i < 3; i++) { 849
785 radio->buffer[radio->wr_index] = tmpbuf[i]; 850 /* wrap write pointer */
786 radio->wr_index++; 851 if (radio->wr_index >= radio->buf_size)
787 } 852 radio->wr_index = 0;
788 853
789 /* wrap write pointer */ 854 /* check for overflow */
790 if (radio->wr_index >= radio->buf_size) 855 if (radio->wr_index == radio->rd_index) {
791 radio->wr_index = 0; 856 /* increment and wrap read pointer */
792 857 radio->rd_index += 3;
793 /* check for overflow */ 858 if (radio->rd_index >= radio->buf_size)
794 if (radio->wr_index == radio->rd_index) { 859 radio->rd_index = 0;
795 /* increment and wrap read pointer */
796 radio->rd_index += 3;
797 if (radio->rd_index >= radio->buf_size)
798 radio->rd_index = 0;
799 }
800 } 860 }
801 spin_unlock(&radio->lock);
802 } 861 }
862 mutex_unlock(&radio->lock);
803 863
804 /* wake up read queue */ 864 /* wake up read queue */
805 if (radio->wr_index != radio->rd_index) 865 if (radio->wr_index != radio->rd_index)
@@ -808,29 +868,18 @@ static void si470x_rds(struct si470x_device *radio)
808 868
809 869
810/* 870/*
811 * si470x_timer - rds timer function
812 */
813static void si470x_timer(unsigned long data)
814{
815 struct si470x_device *radio = (struct si470x_device *) data;
816
817 schedule_work(&radio->work);
818}
819
820
821/*
822 * si470x_work - rds work function 871 * si470x_work - rds work function
823 */ 872 */
824static void si470x_work(struct work_struct *work) 873static void si470x_work(struct work_struct *work)
825{ 874{
826 struct si470x_device *radio = container_of(work, struct si470x_device, 875 struct si470x_device *radio = container_of(work, struct si470x_device,
827 work); 876 work.work);
828 877
829 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) 878 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
830 return; 879 return;
831 880
832 si470x_rds(radio); 881 si470x_rds(radio);
833 mod_timer(&radio->timer, jiffies + msecs_to_jiffies(rds_poll_time)); 882 schedule_delayed_work(&radio->work, msecs_to_jiffies(rds_poll_time));
834} 883}
835 884
836 885
@@ -852,44 +901,44 @@ static ssize_t si470x_fops_read(struct file *file, char __user *buf,
852 /* switch on rds reception */ 901 /* switch on rds reception */
853 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) { 902 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) {
854 si470x_rds_on(radio); 903 si470x_rds_on(radio);
855 schedule_work(&radio->work); 904 schedule_delayed_work(&radio->work,
905 msecs_to_jiffies(rds_poll_time));
856 } 906 }
857 907
858 /* block if no new data available */ 908 /* block if no new data available */
859 while (radio->wr_index == radio->rd_index) { 909 while (radio->wr_index == radio->rd_index) {
860 if (file->f_flags & O_NONBLOCK) 910 if (file->f_flags & O_NONBLOCK)
861 return -EWOULDBLOCK; 911 return -EWOULDBLOCK;
862 interruptible_sleep_on(&radio->read_queue); 912 if (wait_event_interruptible(radio->read_queue,
913 radio->wr_index != radio->rd_index) < 0)
914 return -EINTR;
863 } 915 }
864 916
865 /* calculate block count from byte count */ 917 /* calculate block count from byte count */
866 count /= 3; 918 count /= 3;
867 919
868 /* copy RDS block out of internal buffer and to user buffer */ 920 /* copy RDS block out of internal buffer and to user buffer */
869 if (spin_trylock(&radio->lock)) { 921 mutex_lock(&radio->lock);
870 while (block_count < count) { 922 while (block_count < count) {
871 if (radio->rd_index == radio->wr_index) 923 if (radio->rd_index == radio->wr_index)
872 break; 924 break;
873 925
874 /* always transfer rds complete blocks */ 926 /* always transfer rds complete blocks */
875 if (copy_to_user(buf, 927 if (copy_to_user(buf, &radio->buffer[radio->rd_index], 3))
876 &radio->buffer[radio->rd_index], 3)) 928 /* retval = -EFAULT; */
877 /* retval = -EFAULT; */ 929 break;
878 break; 930
879 931 /* increment and wrap read pointer */
880 /* increment and wrap read pointer */ 932 radio->rd_index += 3;
881 radio->rd_index += 3; 933 if (radio->rd_index >= radio->buf_size)
882 if (radio->rd_index >= radio->buf_size) 934 radio->rd_index = 0;
883 radio->rd_index = 0; 935
884 936 /* increment counters */
885 /* increment counters */ 937 block_count++;
886 block_count++; 938 buf += 3;
887 buf += 3; 939 retval += 3;
888 retval += 3;
889 }
890
891 spin_unlock(&radio->lock);
892 } 940 }
941 mutex_unlock(&radio->lock);
893 942
894 return retval; 943 return retval;
895} 944}
@@ -906,7 +955,8 @@ static unsigned int si470x_fops_poll(struct file *file,
906 /* switch on rds reception */ 955 /* switch on rds reception */
907 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) { 956 if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0) {
908 si470x_rds_on(radio); 957 si470x_rds_on(radio);
909 schedule_work(&radio->work); 958 schedule_delayed_work(&radio->work,
959 msecs_to_jiffies(rds_poll_time));
910 } 960 }
911 961
912 poll_wait(file, &radio->read_queue, pts); 962 poll_wait(file, &radio->read_queue, pts);
@@ -924,10 +974,22 @@ static unsigned int si470x_fops_poll(struct file *file,
924static int si470x_fops_open(struct inode *inode, struct file *file) 974static int si470x_fops_open(struct inode *inode, struct file *file)
925{ 975{
926 struct si470x_device *radio = video_get_drvdata(video_devdata(file)); 976 struct si470x_device *radio = video_get_drvdata(video_devdata(file));
977 int retval;
927 978
928 radio->users++; 979 radio->users++;
929 if (radio->users == 1) 980
930 return si470x_start(radio); 981 retval = usb_autopm_get_interface(radio->intf);
982 if (retval < 0) {
983 radio->users--;
984 return -EIO;
985 }
986
987 if (radio->users == 1) {
988 retval = si470x_start(radio);
989 if (retval < 0)
990 usb_autopm_put_interface(radio->intf);
991 return retval;
992 }
931 993
932 return 0; 994 return 0;
933} 995}
@@ -939,6 +1001,7 @@ static int si470x_fops_open(struct inode *inode, struct file *file)
939static int si470x_fops_release(struct inode *inode, struct file *file) 1001static int si470x_fops_release(struct inode *inode, struct file *file)
940{ 1002{
941 struct si470x_device *radio = video_get_drvdata(video_devdata(file)); 1003 struct si470x_device *radio = video_get_drvdata(video_devdata(file));
1004 int retval;
942 1005
943 if (!radio) 1006 if (!radio)
944 return -ENODEV; 1007 return -ENODEV;
@@ -946,13 +1009,14 @@ static int si470x_fops_release(struct inode *inode, struct file *file)
946 radio->users--; 1009 radio->users--;
947 if (radio->users == 0) { 1010 if (radio->users == 0) {
948 /* stop rds reception */ 1011 /* stop rds reception */
949 del_timer_sync(&radio->timer); 1012 cancel_delayed_work_sync(&radio->work);
950 flush_scheduled_work();
951 1013
952 /* cancel read processes */ 1014 /* cancel read processes */
953 wake_up_interruptible(&radio->read_queue); 1015 wake_up_interruptible(&radio->read_queue);
954 1016
955 return si470x_stop(radio); 1017 retval = si470x_stop(radio);
1018 usb_autopm_put_interface(radio->intf);
1019 return retval;
956 } 1020 }
957 1021
958 return 0; 1022 return 0;
@@ -1030,7 +1094,7 @@ static int si470x_vidioc_querycap(struct file *file, void *priv,
1030 strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); 1094 strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
1031 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); 1095 strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
1032 sprintf(capability->bus_info, "USB"); 1096 sprintf(capability->bus_info, "USB");
1033 capability->version = DRIVER_VERSION; 1097 capability->version = DRIVER_KERNEL_VERSION;
1034 capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO; 1098 capability->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
1035 1099
1036 return 0; 1100 return 0;
@@ -1067,16 +1131,21 @@ static int si470x_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
1067static int si470x_vidioc_queryctrl(struct file *file, void *priv, 1131static int si470x_vidioc_queryctrl(struct file *file, void *priv,
1068 struct v4l2_queryctrl *qc) 1132 struct v4l2_queryctrl *qc)
1069{ 1133{
1070 int i; 1134 unsigned char i;
1135 int retval = -EINVAL;
1071 1136
1072 for (i = 0; i < ARRAY_SIZE(si470x_v4l2_queryctrl); i++) { 1137 for (i = 0; i < ARRAY_SIZE(si470x_v4l2_queryctrl); i++) {
1073 if (qc->id && qc->id == si470x_v4l2_queryctrl[i].id) { 1138 if (qc->id && qc->id == si470x_v4l2_queryctrl[i].id) {
1074 memcpy(qc, &(si470x_v4l2_queryctrl[i]), sizeof(*qc)); 1139 memcpy(qc, &(si470x_v4l2_queryctrl[i]), sizeof(*qc));
1075 return 0; 1140 retval = 0;
1141 break;
1076 } 1142 }
1077 } 1143 }
1144 if (retval < 0)
1145 printk(KERN_WARNING DRIVER_NAME
1146 ": query control failed with %d\n", retval);
1078 1147
1079 return -EINVAL; 1148 return retval;
1080} 1149}
1081 1150
1082 1151
@@ -1110,21 +1179,29 @@ static int si470x_vidioc_s_ctrl(struct file *file, void *priv,
1110 struct v4l2_control *ctrl) 1179 struct v4l2_control *ctrl)
1111{ 1180{
1112 struct si470x_device *radio = video_get_drvdata(video_devdata(file)); 1181 struct si470x_device *radio = video_get_drvdata(video_devdata(file));
1182 int retval;
1113 1183
1114 switch (ctrl->id) { 1184 switch (ctrl->id) {
1115 case V4L2_CID_AUDIO_VOLUME: 1185 case V4L2_CID_AUDIO_VOLUME:
1116 radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_VOLUME; 1186 radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_VOLUME;
1117 radio->registers[SYSCONFIG2] |= ctrl->value; 1187 radio->registers[SYSCONFIG2] |= ctrl->value;
1118 return si470x_set_register(radio, SYSCONFIG2); 1188 retval = si470x_set_register(radio, SYSCONFIG2);
1189 break;
1119 case V4L2_CID_AUDIO_MUTE: 1190 case V4L2_CID_AUDIO_MUTE:
1120 if (ctrl->value == 1) 1191 if (ctrl->value == 1)
1121 radio->registers[POWERCFG] &= ~POWERCFG_DMUTE; 1192 radio->registers[POWERCFG] &= ~POWERCFG_DMUTE;
1122 else 1193 else
1123 radio->registers[POWERCFG] |= POWERCFG_DMUTE; 1194 radio->registers[POWERCFG] |= POWERCFG_DMUTE;
1124 return si470x_set_register(radio, POWERCFG); 1195 retval = si470x_set_register(radio, POWERCFG);
1196 break;
1197 default:
1198 retval = -EINVAL;
1125 } 1199 }
1200 if (retval < 0)
1201 printk(KERN_WARNING DRIVER_NAME
1202 ": set control failed with %d\n", retval);
1126 1203
1127 return -EINVAL; 1204 return retval;
1128} 1205}
1129 1206
1130 1207
@@ -1163,8 +1240,8 @@ static int si470x_vidioc_s_audio(struct file *file, void *priv,
1163static int si470x_vidioc_g_tuner(struct file *file, void *priv, 1240static int si470x_vidioc_g_tuner(struct file *file, void *priv,
1164 struct v4l2_tuner *tuner) 1241 struct v4l2_tuner *tuner)
1165{ 1242{
1166 int retval;
1167 struct si470x_device *radio = video_get_drvdata(video_devdata(file)); 1243 struct si470x_device *radio = video_get_drvdata(video_devdata(file));
1244 int retval;
1168 1245
1169 if (tuner->index > 0) 1246 if (tuner->index > 0)
1170 return -EINVAL; 1247 return -EINVAL;
@@ -1220,6 +1297,7 @@ static int si470x_vidioc_s_tuner(struct file *file, void *priv,
1220 struct v4l2_tuner *tuner) 1297 struct v4l2_tuner *tuner)
1221{ 1298{
1222 struct si470x_device *radio = video_get_drvdata(video_devdata(file)); 1299 struct si470x_device *radio = video_get_drvdata(video_devdata(file));
1300 int retval;
1223 1301
1224 if (tuner->index > 0) 1302 if (tuner->index > 0)
1225 return -EINVAL; 1303 return -EINVAL;
@@ -1229,7 +1307,12 @@ static int si470x_vidioc_s_tuner(struct file *file, void *priv,
1229 else 1307 else
1230 radio->registers[POWERCFG] &= ~POWERCFG_MONO; /* try stereo */ 1308 radio->registers[POWERCFG] &= ~POWERCFG_MONO; /* try stereo */
1231 1309
1232 return si470x_set_register(radio, POWERCFG); 1310 retval = si470x_set_register(radio, POWERCFG);
1311 if (retval < 0)
1312 printk(KERN_WARNING DRIVER_NAME
1313 ": set tuner failed with %d\n", retval);
1314
1315 return retval;
1233} 1316}
1234 1317
1235 1318
@@ -1255,11 +1338,17 @@ static int si470x_vidioc_s_frequency(struct file *file, void *priv,
1255 struct v4l2_frequency *freq) 1338 struct v4l2_frequency *freq)
1256{ 1339{
1257 struct si470x_device *radio = video_get_drvdata(video_devdata(file)); 1340 struct si470x_device *radio = video_get_drvdata(video_devdata(file));
1341 int retval;
1258 1342
1259 if (freq->type != V4L2_TUNER_RADIO) 1343 if (freq->type != V4L2_TUNER_RADIO)
1260 return -EINVAL; 1344 return -EINVAL;
1261 1345
1262 return si470x_set_freq(radio, freq->frequency); 1346 retval = si470x_set_freq(radio, freq->frequency);
1347 if (retval < 0)
1348 printk(KERN_WARNING DRIVER_NAME
1349 ": set frequency failed with %d\n", retval);
1350
1351 return 0;
1263} 1352}
1264 1353
1265 1354
@@ -1299,71 +1388,116 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
1299 const struct usb_device_id *id) 1388 const struct usb_device_id *id)
1300{ 1389{
1301 struct si470x_device *radio; 1390 struct si470x_device *radio;
1391 int retval = -ENOMEM;
1302 1392
1303 /* memory and interface allocations */ 1393 /* private data allocation */
1304 radio = kmalloc(sizeof(struct si470x_device), GFP_KERNEL); 1394 radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL);
1305 if (!radio) 1395 if (!radio)
1306 return -ENOMEM; 1396 goto err_initial;
1397
1398 /* video device allocation */
1307 radio->videodev = video_device_alloc(); 1399 radio->videodev = video_device_alloc();
1308 if (!radio->videodev) { 1400 if (!radio->videodev)
1309 kfree(radio); 1401 goto err_radio;
1310 return -ENOMEM; 1402
1311 } 1403 /* initial configuration */
1312 memcpy(radio->videodev, &si470x_viddev_template, 1404 memcpy(radio->videodev, &si470x_viddev_template,
1313 sizeof(si470x_viddev_template)); 1405 sizeof(si470x_viddev_template));
1314 radio->users = 0; 1406 radio->users = 0;
1315 radio->usbdev = interface_to_usbdev(intf); 1407 radio->usbdev = interface_to_usbdev(intf);
1408 radio->intf = intf;
1409 mutex_init(&radio->lock);
1316 video_set_drvdata(radio->videodev, radio); 1410 video_set_drvdata(radio->videodev, radio);
1317 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr)) {
1318 printk(KERN_WARNING DRIVER_NAME
1319 ": Could not register video device\n");
1320 video_device_release(radio->videodev);
1321 kfree(radio);
1322 return -EIO;
1323 }
1324 usb_set_intfdata(intf, radio);
1325 1411
1326 /* show some infos about the specific device */ 1412 /* show some infos about the specific device */
1327 if (si470x_get_all_registers(radio) < 0) { 1413 retval = -EIO;
1328 video_device_release(radio->videodev); 1414 if (si470x_get_all_registers(radio) < 0)
1329 kfree(radio); 1415 goto err_all;
1330 return -EIO; 1416 printk(KERN_INFO DRIVER_NAME ": DeviceID=0x%4.4hx ChipID=0x%4.4hx\n",
1331 }
1332 printk(KERN_INFO DRIVER_NAME ": DeviceID=0x%4.4x ChipID=0x%4.4x\n",
1333 radio->registers[DEVICEID], radio->registers[CHIPID]); 1417 radio->registers[DEVICEID], radio->registers[CHIPID]);
1334 1418
1335 /* check if firmware is current */ 1419 /* check if firmware is current */
1336 if ((radio->registers[CHIPID] & CHIPID_FIRMWARE) 1420 if ((radio->registers[CHIPID] & CHIPID_FIRMWARE)
1337 < RADIO_SW_VERSION_CURRENT) 1421 < RADIO_SW_VERSION_CURRENT) {
1422 printk(KERN_WARNING DRIVER_NAME
1423 ": This driver is known to work with "
1424 "firmware version %hu,\n", RADIO_SW_VERSION_CURRENT);
1425 printk(KERN_WARNING DRIVER_NAME
1426 ": but the device has firmware version %hu.\n",
1427 radio->registers[CHIPID] & CHIPID_FIRMWARE);
1428 printk(KERN_WARNING DRIVER_NAME
1429 ": If you have some trouble using this driver,\n");
1338 printk(KERN_WARNING DRIVER_NAME 1430 printk(KERN_WARNING DRIVER_NAME
1339 ": This driver is known to work with chip version %d, " 1431 ": please report to V4L ML at "
1340 "but the device has firmware %d.\n" 1432 "video4linux-list@redhat.com\n");
1341 DRIVER_NAME 1433 }
1342 "If you have some trouble using this driver, please "
1343 "report to V4L ML at video4linux-list@redhat.com\n",
1344 radio->registers[CHIPID] & CHIPID_FIRMWARE,
1345 RADIO_SW_VERSION_CURRENT);
1346 1434
1347 /* set initial frequency */ 1435 /* set initial frequency */
1348 si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */ 1436 si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
1349 1437
1350 /* rds initialization */ 1438 /* rds buffer allocation */
1351 radio->buf_size = rds_buf * 3; 1439 radio->buf_size = rds_buf * 3;
1352 radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL); 1440 radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
1353 if (!radio->buffer) { 1441 if (!radio->buffer)
1354 video_device_release(radio->videodev); 1442 goto err_all;
1355 kfree(radio); 1443
1356 return -ENOMEM; 1444 /* rds buffer configuration */
1357 }
1358 radio->wr_index = 0; 1445 radio->wr_index = 0;
1359 radio->rd_index = 0; 1446 radio->rd_index = 0;
1360 init_waitqueue_head(&radio->read_queue); 1447 init_waitqueue_head(&radio->read_queue);
1361 1448
1362 /* prepare polling via eventd */ 1449 /* prepare rds work function */
1363 INIT_WORK(&radio->work, si470x_work); 1450 INIT_DELAYED_WORK(&radio->work, si470x_work);
1364 init_timer(&radio->timer); 1451
1365 radio->timer.function = si470x_timer; 1452 /* register video device */
1366 radio->timer.data = (unsigned long) radio; 1453 if (video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr)) {
1454 printk(KERN_WARNING DRIVER_NAME
1455 ": Could not register video device\n");
1456 goto err_all;
1457 }
1458 usb_set_intfdata(intf, radio);
1459
1460 return 0;
1461err_all:
1462 video_device_release(radio->videodev);
1463 kfree(radio->buffer);
1464err_radio:
1465 kfree(radio);
1466err_initial:
1467 return retval;
1468}
1469
1470
1471/*
1472 * si470x_usb_driver_suspend - suspend the device
1473 */
1474static int si470x_usb_driver_suspend(struct usb_interface *intf,
1475 pm_message_t message)
1476{
1477 struct si470x_device *radio = usb_get_intfdata(intf);
1478
1479 printk(KERN_INFO DRIVER_NAME ": suspending now...\n");
1480
1481 cancel_delayed_work_sync(&radio->work);
1482
1483 return 0;
1484}
1485
1486
1487/*
1488 * si470x_usb_driver_resume - resume the device
1489 */
1490static int si470x_usb_driver_resume(struct usb_interface *intf)
1491{
1492 struct si470x_device *radio = usb_get_intfdata(intf);
1493
1494 printk(KERN_INFO DRIVER_NAME ": resuming now...\n");
1495
1496 mutex_lock(&radio->lock);
1497 if (radio->users && radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS)
1498 schedule_delayed_work(&radio->work,
1499 msecs_to_jiffies(rds_poll_time));
1500 mutex_unlock(&radio->lock);
1367 1501
1368 return 0; 1502 return 0;
1369} 1503}
@@ -1376,15 +1510,11 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
1376{ 1510{
1377 struct si470x_device *radio = usb_get_intfdata(intf); 1511 struct si470x_device *radio = usb_get_intfdata(intf);
1378 1512
1379 del_timer_sync(&radio->timer); 1513 cancel_delayed_work_sync(&radio->work);
1380 flush_scheduled_work();
1381
1382 usb_set_intfdata(intf, NULL); 1514 usb_set_intfdata(intf, NULL);
1383 if (radio) { 1515 video_unregister_device(radio->videodev);
1384 video_unregister_device(radio->videodev); 1516 kfree(radio->buffer);
1385 kfree(radio->buffer); 1517 kfree(radio);
1386 kfree(radio);
1387 }
1388} 1518}
1389 1519
1390 1520
@@ -1392,10 +1522,13 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
1392 * si470x_usb_driver - usb driver interface 1522 * si470x_usb_driver - usb driver interface
1393 */ 1523 */
1394static struct usb_driver si470x_usb_driver = { 1524static struct usb_driver si470x_usb_driver = {
1395 .name = DRIVER_NAME, 1525 .name = DRIVER_NAME,
1396 .probe = si470x_usb_driver_probe, 1526 .probe = si470x_usb_driver_probe,
1397 .disconnect = si470x_usb_driver_disconnect, 1527 .disconnect = si470x_usb_driver_disconnect,
1398 .id_table = si470x_usb_driver_id_table, 1528 .suspend = si470x_usb_driver_suspend,
1529 .resume = si470x_usb_driver_resume,
1530 .id_table = si470x_usb_driver_id_table,
1531 .supports_autosuspend = 1,
1399}; 1532};
1400 1533
1401 1534
@@ -1409,7 +1542,7 @@ static struct usb_driver si470x_usb_driver = {
1409 */ 1542 */
1410static int __init si470x_module_init(void) 1543static int __init si470x_module_init(void)
1411{ 1544{
1412 printk(KERN_INFO DRIVER_DESC "\n"); 1545 printk(KERN_INFO DRIVER_DESC ", Version " DRIVER_VERSION "\n");
1413 return usb_register(&si470x_usb_driver); 1546 return usb_register(&si470x_usb_driver);
1414} 1547}
1415 1548
@@ -1429,4 +1562,4 @@ module_exit(si470x_module_exit);
1429MODULE_LICENSE("GPL"); 1562MODULE_LICENSE("GPL");
1430MODULE_AUTHOR(DRIVER_AUTHOR); 1563MODULE_AUTHOR(DRIVER_AUTHOR);
1431MODULE_DESCRIPTION(DRIVER_DESC); 1564MODULE_DESCRIPTION(DRIVER_DESC);
1432MODULE_VERSION("1.0.4"); 1565MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index a2e8987a6195..37072a21d8c9 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -4,14 +4,14 @@
4 4
5menuconfig VIDEO_CAPTURE_DRIVERS 5menuconfig VIDEO_CAPTURE_DRIVERS
6 bool "Video capture adapters" 6 bool "Video capture adapters"
7 depends on VIDEO_DEV 7 depends on VIDEO_V4L2
8 default y 8 default y
9 ---help--- 9 ---help---
10 Say Y here to enable selecting the video adapters for 10 Say Y here to enable selecting the video adapters for
11 webcams, analog TV, and hybrid analog/digital TV. 11 webcams, analog TV, and hybrid analog/digital TV.
12 Some of those devices also supports FM radio. 12 Some of those devices also supports FM radio.
13 13
14if VIDEO_CAPTURE_DRIVERS && VIDEO_DEV 14if VIDEO_CAPTURE_DRIVERS && VIDEO_V4L2
15 15
16config VIDEO_ADV_DEBUG 16config VIDEO_ADV_DEBUG
17 bool "Enable advanced debug functionality" 17 bool "Enable advanced debug functionality"
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 850b8c6f4577..3f209b32eeac 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -10,8 +10,9 @@ msp3400-objs := msp3400-driver.o msp3400-kthreads.o
10 10
11stkwebcam-objs := stk-webcam.o stk-sensor.o 11stkwebcam-objs := stk-webcam.o stk-sensor.o
12 12
13obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o compat_ioctl32.o \ 13obj-$(CONFIG_VIDEO_DEV) += videodev.o compat_ioctl32.o v4l2-int-device.o
14 v4l2-int-device.o 14
15obj-$(CONFIG_VIDEO_V4L2_COMMON) += v4l2-common.o
15 16
16ifeq ($(CONFIG_VIDEO_V4L1_COMPAT),y) 17ifeq ($(CONFIG_VIDEO_V4L1_COMPAT),y)
17 obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o 18 obj-$(CONFIG_VIDEO_DEV) += v4l1-compat.o
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 907dc62c1783..5404fcc5276d 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -2354,8 +2354,8 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
2354 BUG(); 2354 BUG();
2355 } 2355 }
2356 2356
2357 mutex_lock(&fh->cap.lock); 2357 mutex_lock(&fh->cap.vb_lock);
2358 kfree(fh->ov.clips); 2358 kfree(fh->ov.clips);
2359 fh->ov.clips = clips; 2359 fh->ov.clips = clips;
2360 fh->ov.nclips = n; 2360 fh->ov.nclips = n;
2361 2361
@@ -2376,7 +2376,7 @@ static int setup_window(struct bttv_fh *fh, struct bttv *btv,
2376 bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); 2376 bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new);
2377 retval = bttv_switch_overlay(btv,fh,new); 2377 retval = bttv_switch_overlay(btv,fh,new);
2378 } 2378 }
2379 mutex_unlock(&fh->cap.lock); 2379 mutex_unlock(&fh->cap.vb_lock);
2380 return retval; 2380 return retval;
2381} 2381}
2382 2382
@@ -2576,7 +2576,7 @@ static int bttv_s_fmt_cap(struct file *file, void *priv,
2576 fmt = format_by_fourcc(f->fmt.pix.pixelformat); 2576 fmt = format_by_fourcc(f->fmt.pix.pixelformat);
2577 2577
2578 /* update our state informations */ 2578 /* update our state informations */
2579 mutex_lock(&fh->cap.lock); 2579 mutex_lock(&fh->cap.vb_lock);
2580 fh->fmt = fmt; 2580 fh->fmt = fmt;
2581 fh->cap.field = f->fmt.pix.field; 2581 fh->cap.field = f->fmt.pix.field;
2582 fh->cap.last = V4L2_FIELD_NONE; 2582 fh->cap.last = V4L2_FIELD_NONE;
@@ -2585,7 +2585,7 @@ static int bttv_s_fmt_cap(struct file *file, void *priv,
2585 btv->init.fmt = fmt; 2585 btv->init.fmt = fmt;
2586 btv->init.width = f->fmt.pix.width; 2586 btv->init.width = f->fmt.pix.width;
2587 btv->init.height = f->fmt.pix.height; 2587 btv->init.height = f->fmt.pix.height;
2588 mutex_unlock(&fh->cap.lock); 2588 mutex_unlock(&fh->cap.vb_lock);
2589 2589
2590 return 0; 2590 return 0;
2591} 2591}
@@ -2611,11 +2611,11 @@ static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
2611 unsigned int i; 2611 unsigned int i;
2612 struct bttv_fh *fh = priv; 2612 struct bttv_fh *fh = priv;
2613 2613
2614 mutex_lock(&fh->cap.lock); 2614 mutex_lock(&fh->cap.vb_lock);
2615 retval = videobuf_mmap_setup(&fh->cap, gbuffers, gbufsize, 2615 retval = videobuf_mmap_setup(&fh->cap, gbuffers, gbufsize,
2616 V4L2_MEMORY_MMAP); 2616 V4L2_MEMORY_MMAP);
2617 if (retval < 0) { 2617 if (retval < 0) {
2618 mutex_unlock(&fh->cap.lock); 2618 mutex_unlock(&fh->cap.vb_lock);
2619 return retval; 2619 return retval;
2620 } 2620 }
2621 2621
@@ -2627,7 +2627,7 @@ static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf)
2627 for (i = 0; i < gbuffers; i++) 2627 for (i = 0; i < gbuffers; i++)
2628 mbuf->offsets[i] = i * gbufsize; 2628 mbuf->offsets[i] = i * gbufsize;
2629 2629
2630 mutex_unlock(&fh->cap.lock); 2630 mutex_unlock(&fh->cap.vb_lock);
2631 return 0; 2631 return 0;
2632} 2632}
2633#endif 2633#endif
@@ -2756,10 +2756,11 @@ static int bttv_overlay(struct file *file, void *f, unsigned int on)
2756 if (!check_alloc_btres(btv, fh, RESOURCE_OVERLAY)) 2756 if (!check_alloc_btres(btv, fh, RESOURCE_OVERLAY))
2757 return -EBUSY; 2757 return -EBUSY;
2758 2758
2759 mutex_lock(&fh->cap.lock); 2759 mutex_lock(&fh->cap.vb_lock);
2760 if (on) { 2760 if (on) {
2761 fh->ov.tvnorm = btv->tvnorm; 2761 fh->ov.tvnorm = btv->tvnorm;
2762 new = videobuf_pci_alloc(sizeof(*new)); 2762 new = videobuf_pci_alloc(sizeof(*new));
2763 new->crop = btv->crop[!!fh->do_crop].rect;
2763 bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); 2764 bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new);
2764 } else { 2765 } else {
2765 new = NULL; 2766 new = NULL;
@@ -2767,7 +2768,7 @@ static int bttv_overlay(struct file *file, void *f, unsigned int on)
2767 2768
2768 /* switch over */ 2769 /* switch over */
2769 retval = bttv_switch_overlay(btv, fh, new); 2770 retval = bttv_switch_overlay(btv, fh, new);
2770 mutex_unlock(&fh->cap.lock); 2771 mutex_unlock(&fh->cap.vb_lock);
2771 return retval; 2772 return retval;
2772} 2773}
2773 2774
@@ -2806,7 +2807,7 @@ static int bttv_s_fbuf(struct file *file, void *f,
2806 } 2807 }
2807 2808
2808 /* ok, accept it */ 2809 /* ok, accept it */
2809 mutex_lock(&fh->cap.lock); 2810 mutex_lock(&fh->cap.vb_lock);
2810 btv->fbuf.base = fb->base; 2811 btv->fbuf.base = fb->base;
2811 btv->fbuf.fmt.width = fb->fmt.width; 2812 btv->fbuf.fmt.width = fb->fmt.width;
2812 btv->fbuf.fmt.height = fb->fmt.height; 2813 btv->fbuf.fmt.height = fb->fmt.height;
@@ -2838,7 +2839,7 @@ static int bttv_s_fbuf(struct file *file, void *f,
2838 retval = bttv_switch_overlay(btv, fh, new); 2839 retval = bttv_switch_overlay(btv, fh, new);
2839 } 2840 }
2840 } 2841 }
2841 mutex_unlock(&fh->cap.lock); 2842 mutex_unlock(&fh->cap.vb_lock);
2842 return retval; 2843 return retval;
2843} 2844}
2844 2845
@@ -3090,7 +3091,7 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop)
3090 3091
3091 fh->do_crop = 1; 3092 fh->do_crop = 1;
3092 3093
3093 mutex_lock(&fh->cap.lock); 3094 mutex_lock(&fh->cap.vb_lock);
3094 3095
3095 if (fh->width < c.min_scaled_width) { 3096 if (fh->width < c.min_scaled_width) {
3096 fh->width = c.min_scaled_width; 3097 fh->width = c.min_scaled_width;
@@ -3108,7 +3109,7 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop)
3108 btv->init.height = c.max_scaled_height; 3109 btv->init.height = c.max_scaled_height;
3109 } 3110 }
3110 3111
3111 mutex_unlock(&fh->cap.lock); 3112 mutex_unlock(&fh->cap.vb_lock);
3112 3113
3113 return 0; 3114 return 0;
3114} 3115}
@@ -3177,30 +3178,25 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
3177 buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream); 3178 buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream);
3178 } else { 3179 } else {
3179 /* read() capture */ 3180 /* read() capture */
3180 mutex_lock(&fh->cap.lock); 3181 mutex_lock(&fh->cap.vb_lock);
3181 if (NULL == fh->cap.read_buf) { 3182 if (NULL == fh->cap.read_buf) {
3182 /* need to capture a new frame */ 3183 /* need to capture a new frame */
3183 if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM)) { 3184 if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM))
3184 mutex_unlock(&fh->cap.lock); 3185 goto err;
3185 return POLLERR;
3186 }
3187 fh->cap.read_buf = videobuf_pci_alloc(fh->cap.msize); 3186 fh->cap.read_buf = videobuf_pci_alloc(fh->cap.msize);
3188 if (NULL == fh->cap.read_buf) { 3187 if (NULL == fh->cap.read_buf)
3189 mutex_unlock(&fh->cap.lock); 3188 goto err;
3190 return POLLERR;
3191 }
3192 fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR; 3189 fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR;
3193 field = videobuf_next_field(&fh->cap); 3190 field = videobuf_next_field(&fh->cap);
3194 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) { 3191 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) {
3195 kfree (fh->cap.read_buf); 3192 kfree (fh->cap.read_buf);
3196 fh->cap.read_buf = NULL; 3193 fh->cap.read_buf = NULL;
3197 mutex_unlock(&fh->cap.lock); 3194 goto err;
3198 return POLLERR;
3199 } 3195 }
3200 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf); 3196 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
3201 fh->cap.read_off = 0; 3197 fh->cap.read_off = 0;
3202 } 3198 }
3203 mutex_unlock(&fh->cap.lock); 3199 mutex_unlock(&fh->cap.vb_lock);
3204 buf = (struct bttv_buffer*)fh->cap.read_buf; 3200 buf = (struct bttv_buffer*)fh->cap.read_buf;
3205 } 3201 }
3206 3202
@@ -3209,6 +3205,9 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait)
3209 buf->vb.state == VIDEOBUF_ERROR) 3205 buf->vb.state == VIDEOBUF_ERROR)
3210 return POLLIN|POLLRDNORM; 3206 return POLLIN|POLLRDNORM;
3211 return 0; 3207 return 0;
3208err:
3209 mutex_unlock(&fh->cap.vb_lock);
3210 return POLLERR;
3212} 3211}
3213 3212
3214static int bttv_open(struct inode *inode, struct file *file) 3213static int bttv_open(struct inode *inode, struct file *file)
diff --git a/drivers/media/video/bt8xx/bttv-vbi.c b/drivers/media/video/bt8xx/bttv-vbi.c
index 1f0cc79e2a33..75fa82c7c735 100644
--- a/drivers/media/video/bt8xx/bttv-vbi.c
+++ b/drivers/media/video/bt8xx/bttv-vbi.c
@@ -352,13 +352,13 @@ int bttv_s_fmt_vbi(struct file *file, void *f, struct v4l2_format *frt)
352 because vbi_fmt.end counts field lines times two. */ 352 because vbi_fmt.end counts field lines times two. */
353 end = max(frt->fmt.vbi.start[0], start1) * 2 + 2; 353 end = max(frt->fmt.vbi.start[0], start1) * 2 + 2;
354 354
355 mutex_lock(&fh->vbi.lock); 355 mutex_lock(&fh->vbi.vb_lock);
356 356
357 fh->vbi_fmt.fmt = frt->fmt.vbi; 357 fh->vbi_fmt.fmt = frt->fmt.vbi;
358 fh->vbi_fmt.tvnorm = tvnorm; 358 fh->vbi_fmt.tvnorm = tvnorm;
359 fh->vbi_fmt.end = end; 359 fh->vbi_fmt.end = end;
360 360
361 mutex_unlock(&fh->vbi.lock); 361 mutex_unlock(&fh->vbi.vb_lock);
362 362
363 rc = 0; 363 rc = 0;
364 364
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index 0aedbeaf94cd..e357f415db06 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -609,13 +609,19 @@ static int cx8802_request_acquire(struct cx8802_driver *drv)
609 struct cx88_core *core = drv->core; 609 struct cx88_core *core = drv->core;
610 610
611 /* Fail a request for hardware if the device is busy. */ 611 /* Fail a request for hardware if the device is busy. */
612 if (core->active_type_id != CX88_BOARD_NONE) 612 if (core->active_type_id != CX88_BOARD_NONE &&
613 core->active_type_id != drv->type_id)
613 return -EBUSY; 614 return -EBUSY;
614 615
615 if (drv->advise_acquire) 616 if (drv->advise_acquire)
616 { 617 {
617 core->active_type_id = drv->type_id; 618 mutex_lock(&drv->core->lock);
618 drv->advise_acquire(drv); 619 core->active_ref++;
620 if (core->active_type_id == CX88_BOARD_NONE) {
621 core->active_type_id = drv->type_id;
622 drv->advise_acquire(drv);
623 }
624 mutex_unlock(&drv->core->lock);
619 625
620 mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __FUNCTION__, cx_read(MO_GP0_IO)); 626 mpeg_dbg(1,"%s() Post acquire GPIO=%x\n", __FUNCTION__, cx_read(MO_GP0_IO));
621 } 627 }
@@ -628,12 +634,14 @@ static int cx8802_request_release(struct cx8802_driver *drv)
628{ 634{
629 struct cx88_core *core = drv->core; 635 struct cx88_core *core = drv->core;
630 636
631 if (drv->advise_release) 637 mutex_lock(&drv->core->lock);
638 if (drv->advise_release && --core->active_ref == 0)
632 { 639 {
633 drv->advise_release(drv); 640 drv->advise_release(drv);
634 core->active_type_id = CX88_BOARD_NONE; 641 core->active_type_id = CX88_BOARD_NONE;
635 mpeg_dbg(1,"%s() Post release GPIO=%x\n", __FUNCTION__, cx_read(MO_GP0_IO)); 642 mpeg_dbg(1,"%s() Post release GPIO=%x\n", __FUNCTION__, cx_read(MO_GP0_IO));
636 } 643 }
644 mutex_unlock(&drv->core->lock);
637 645
638 return 0; 646 return 0;
639} 647}
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index 4e823f2a539a..37e6d2e4002f 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -336,6 +336,7 @@ struct cx88_core {
336 /* cx88-video needs to access cx8802 for hybrid tuner pll access. */ 336 /* cx88-video needs to access cx8802 for hybrid tuner pll access. */
337 struct cx8802_dev *dvbdev; 337 struct cx8802_dev *dvbdev;
338 enum cx88_board_type active_type_id; 338 enum cx88_board_type active_type_id;
339 int active_ref;
339}; 340};
340 341
341struct cx8800_dev; 342struct cx8800_dev;
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index 941357c4f3f5..8c67f678266a 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -35,7 +35,6 @@
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <sound/driver.h>
39#include <sound/core.h> 38#include <sound/core.h>
40#include <sound/pcm.h> 39#include <sound/pcm.h>
41#include <sound/pcm_params.h> 40#include <sound/pcm_params.h>
@@ -270,8 +269,11 @@ static int snd_em28xx_capture_open(struct snd_pcm_substream *substream)
270 dprintk("opening device and trying to acquire exclusive lock\n"); 269 dprintk("opening device and trying to acquire exclusive lock\n");
271 270
272 /* Sets volume, mute, etc */ 271 /* Sets volume, mute, etc */
272
273 dev->mute = 0; 273 dev->mute = 0;
274 mutex_lock(&dev->lock);
274 ret = em28xx_audio_analog_set(dev); 275 ret = em28xx_audio_analog_set(dev);
276 mutex_unlock(&dev->lock);
275 if (ret < 0) 277 if (ret < 0)
276 goto err; 278 goto err;
277 279
@@ -303,7 +305,9 @@ static int snd_em28xx_pcm_close(struct snd_pcm_substream *substream)
303 dprintk("closing device\n"); 305 dprintk("closing device\n");
304 306
305 dev->mute = 1; 307 dev->mute = 1;
308 mutex_lock(&dev->lock);
306 em28xx_audio_analog_set(dev); 309 em28xx_audio_analog_set(dev);
310 mutex_unlock(&dev->lock);
307 311
308 if (dev->adev->users == 0 && dev->adev->shutdown == 1) { 312 if (dev->adev->users == 0 && dev->adev->shutdown == 1) {
309 dprintk("audio users: %d\n", dev->adev->users); 313 dprintk("audio users: %d\n", dev->adev->users);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 2159d0160df2..aae7753fef11 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -393,15 +393,15 @@ struct em28xx_board em28xx_boards[] = {
393 .input = { { 393 .input = { {
394 .type = EM28XX_VMUX_TELEVISION, 394 .type = EM28XX_VMUX_TELEVISION,
395 .vmux = SAA7115_COMPOSITE2, 395 .vmux = SAA7115_COMPOSITE2,
396 .amux = 1, 396 .amux = EM28XX_AMUX_LINE_IN,
397 }, { 397 }, {
398 .type = EM28XX_VMUX_COMPOSITE1, 398 .type = EM28XX_VMUX_COMPOSITE1,
399 .vmux = SAA7115_COMPOSITE0, 399 .vmux = SAA7115_COMPOSITE0,
400 .amux = 1, 400 .amux = EM28XX_AMUX_LINE_IN,
401 }, { 401 }, {
402 .type = EM28XX_VMUX_SVIDEO, 402 .type = EM28XX_VMUX_SVIDEO,
403 .vmux = SAA7115_SVIDEO3, 403 .vmux = SAA7115_SVIDEO3,
404 .amux = 1, 404 .amux = EM28XX_AMUX_LINE_IN,
405 } }, 405 } },
406 }, 406 },
407}; 407};
@@ -441,6 +441,8 @@ struct usb_device_id em28xx_id_table [] = {
441 .driver_info = EM2820_BOARD_PINNACLE_DVC_90 }, 441 .driver_info = EM2820_BOARD_PINNACLE_DVC_90 },
442 { USB_DEVICE(0x2040, 0x6500), 442 { USB_DEVICE(0x2040, 0x6500),
443 .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900 }, 443 .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900 },
444 { USB_DEVICE(0x2040, 0x6502),
445 .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_900 },
444 { USB_DEVICE(0x2040, 0x6513), 446 { USB_DEVICE(0x2040, 0x6513),
445 .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_950 }, 447 .driver_info = EM2880_BOARD_HAUPPAUGE_WINTV_HVR_950 },
446 { USB_DEVICE(0x0ccd, 0x0042), 448 { USB_DEVICE(0x0ccd, 0x0042),
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index f6b78357f0e5..7d1537cab867 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -72,7 +72,8 @@ u32 em28xx_request_buffers(struct em28xx *dev, u32 count)
72 const size_t imagesize = PAGE_ALIGN(dev->frame_size); /*needs to be page aligned cause the buffers can be mapped individually! */ 72 const size_t imagesize = PAGE_ALIGN(dev->frame_size); /*needs to be page aligned cause the buffers can be mapped individually! */
73 void *buff = NULL; 73 void *buff = NULL;
74 u32 i; 74 u32 i;
75 em28xx_coredbg("requested %i buffers with size %zi", count, imagesize); 75 em28xx_coredbg("requested %i buffers with size %zi\n",
76 count, imagesize);
76 if (count > EM28XX_NUM_FRAMES) 77 if (count > EM28XX_NUM_FRAMES)
77 count = EM28XX_NUM_FRAMES; 78 count = EM28XX_NUM_FRAMES;
78 79
@@ -150,7 +151,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
150 if (reg_debug){ 151 if (reg_debug){
151 printk(ret < 0 ? " failed!\n" : "%02x values: ", ret); 152 printk(ret < 0 ? " failed!\n" : "%02x values: ", ret);
152 for (byte = 0; byte < len; byte++) { 153 for (byte = 0; byte < len; byte++) {
153 printk(" %02x", buf[byte]); 154 printk(" %02x", (unsigned char)buf[byte]);
154 } 155 }
155 printk("\n"); 156 printk("\n");
156 } 157 }
@@ -177,7 +178,8 @@ int em28xx_read_reg_req(struct em28xx *dev, u8 req, u16 reg)
177 0x0000, reg, &val, 1, HZ); 178 0x0000, reg, &val, 1, HZ);
178 179
179 if (reg_debug) 180 if (reg_debug)
180 printk(ret < 0 ? " failed!\n" : "%02x\n", val); 181 printk(ret < 0 ? " failed!\n" :
182 "%02x\n", (unsigned char) val);
181 183
182 if (ret < 0) 184 if (ret < 0)
183 return ret; 185 return ret;
@@ -237,7 +239,7 @@ int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len)
237 * sets only some bits (specified by bitmask) of a register, by first reading 239 * sets only some bits (specified by bitmask) of a register, by first reading
238 * the actual value 240 * the actual value
239 */ 241 */
240int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val, 242static int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
241 u8 bitmask) 243 u8 bitmask)
242{ 244{
243 int oldval; 245 int oldval;
@@ -254,26 +256,31 @@ int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
254 */ 256 */
255static int em28xx_write_ac97(struct em28xx *dev, u8 reg, u8 *val) 257static int em28xx_write_ac97(struct em28xx *dev, u8 reg, u8 *val)
256{ 258{
257 int ret; 259 int ret, i;
258 u8 addr = reg & 0x7f; 260 u8 addr = reg & 0x7f;
259 if ((ret = em28xx_write_regs(dev, AC97LSB_REG, val, 2)) < 0) 261 if ((ret = em28xx_write_regs(dev, AC97LSB_REG, val, 2)) < 0)
260 return ret; 262 return ret;
261 if ((ret = em28xx_write_regs(dev, AC97ADDR_REG, &addr, 1)) < 0) 263 if ((ret = em28xx_write_regs(dev, AC97ADDR_REG, &addr, 1)) < 0)
262 return ret; 264 return ret;
263 if ((ret = em28xx_read_reg(dev, AC97BUSY_REG)) < 0) 265
264 return ret; 266 /* Wait up to 50 ms for AC97 command to complete */
265 else if (((u8) ret) & 0x01) { 267 for (i = 0; i < 10; i++) {
266 em28xx_warn ("AC97 command still being executed: not handled properly!\n"); 268 if ((ret = em28xx_read_reg(dev, AC97BUSY_REG)) < 0)
269 return ret;
270 if (!((u8) ret) & 0x01)
271 return 0;
272 msleep(5);
267 } 273 }
274 em28xx_warn ("AC97 command still being executed: not handled properly!\n");
268 return 0; 275 return 0;
269} 276}
270 277
271int em28xx_set_audio_source(struct em28xx *dev) 278static int em28xx_set_audio_source(struct em28xx *dev)
272{ 279{
273 static char *enable = "\x08\x08"; 280 static char *enable = "\x08\x08";
274 static char *disable = "\x08\x88"; 281 static char *disable = "\x08\x88";
275 char *video = enable, *line = disable; 282 char *video = enable, *line = disable;
276 int ret, no_ac97; 283 int ret;
277 u8 input; 284 u8 input;
278 285
279 if (dev->is_em2800) { 286 if (dev->is_em2800) {
@@ -293,11 +300,9 @@ int em28xx_set_audio_source(struct em28xx *dev)
293 switch (dev->ctl_ainput) { 300 switch (dev->ctl_ainput) {
294 case EM28XX_AMUX_VIDEO: 301 case EM28XX_AMUX_VIDEO:
295 input = EM28XX_AUDIO_SRC_TUNER; 302 input = EM28XX_AUDIO_SRC_TUNER;
296 no_ac97 = 1;
297 break; 303 break;
298 case EM28XX_AMUX_LINE_IN: 304 case EM28XX_AMUX_LINE_IN:
299 input = EM28XX_AUDIO_SRC_LINE; 305 input = EM28XX_AUDIO_SRC_LINE;
300 no_ac97 = 1;
301 break; 306 break;
302 case EM28XX_AMUX_AC97_VIDEO: 307 case EM28XX_AMUX_AC97_VIDEO:
303 input = EM28XX_AUDIO_SRC_LINE; 308 input = EM28XX_AUDIO_SRC_LINE;
@@ -313,12 +318,11 @@ int em28xx_set_audio_source(struct em28xx *dev)
313 ret = em28xx_write_reg_bits(dev, AUDIOSRC_REG, input, 0xc0); 318 ret = em28xx_write_reg_bits(dev, AUDIOSRC_REG, input, 0xc0);
314 if (ret < 0) 319 if (ret < 0)
315 return ret; 320 return ret;
321 msleep(5);
316 322
317 if (no_ac97) 323 /* Sets AC97 mixer registers
318 return 0; 324 This is seems to be needed, even for non-ac97 configs
319 325 */
320 /* Sets AC97 mixer registers */
321
322 ret = em28xx_write_ac97(dev, VIDEO_AC97, video); 326 ret = em28xx_write_ac97(dev, VIDEO_AC97, video);
323 if (ret < 0) 327 if (ret < 0)
324 return ret; 328 return ret;
@@ -337,9 +341,10 @@ int em28xx_audio_analog_set(struct em28xx *dev)
337 s[0] |= 0x1f - dev->volume; 341 s[0] |= 0x1f - dev->volume;
338 s[1] |= 0x1f - dev->volume; 342 s[1] |= 0x1f - dev->volume;
339 343
340 if (dev->mute) 344 /* Mute */
341 s[1] |= 0x80; 345 s[1] |= 0x80;
342 ret = em28xx_write_ac97(dev, MASTER_AC97, s); 346 ret = em28xx_write_ac97(dev, MASTER_AC97, s);
347
343 if (ret < 0) 348 if (ret < 0)
344 return ret; 349 return ret;
345 350
@@ -357,6 +362,11 @@ int em28xx_audio_analog_set(struct em28xx *dev)
357 /* Selects the proper audio input */ 362 /* Selects the proper audio input */
358 ret = em28xx_set_audio_source(dev); 363 ret = em28xx_set_audio_source(dev);
359 364
365 /* Unmute device */
366 if (!dev->mute)
367 s[1] &= ~0x80;
368 ret = em28xx_write_ac97(dev, MASTER_AC97, s);
369
360 return ret; 370 return ret;
361} 371}
362EXPORT_SYMBOL_GPL(em28xx_audio_analog_set); 372EXPORT_SYMBOL_GPL(em28xx_audio_analog_set);
@@ -667,7 +677,7 @@ static void em28xx_isocIrq(struct urb *urb)
667 continue; 677 continue;
668 } 678 }
669 if (urb->iso_frame_desc[i].actual_length > 679 if (urb->iso_frame_desc[i].actual_length >
670 dev->max_pkt_size) { 680 urb->iso_frame_desc[i].length) {
671 em28xx_isocdbg("packet bigger than packet size"); 681 em28xx_isocdbg("packet bigger than packet size");
672 continue; 682 continue;
673 } 683 }
@@ -713,8 +723,11 @@ void em28xx_uninit_isoc(struct em28xx *dev)
713 for (i = 0; i < EM28XX_NUM_BUFS; i++) { 723 for (i = 0; i < EM28XX_NUM_BUFS; i++) {
714 if (dev->urb[i]) { 724 if (dev->urb[i]) {
715 usb_kill_urb(dev->urb[i]); 725 usb_kill_urb(dev->urb[i]);
716 if (dev->transfer_buffer[i]){ 726 if (dev->transfer_buffer[i]) {
717 usb_buffer_free(dev->udev,(EM28XX_NUM_PACKETS*dev->max_pkt_size),dev->transfer_buffer[i],dev->urb[i]->transfer_dma); 727 usb_buffer_free(dev->udev,
728 dev->urb[i]->transfer_buffer_length,
729 dev->transfer_buffer[i],
730 dev->urb[i]->transfer_dma);
718 } 731 }
719 usb_free_urb(dev->urb[i]); 732 usb_free_urb(dev->urb[i]);
720 } 733 }
@@ -732,7 +745,10 @@ int em28xx_init_isoc(struct em28xx *dev)
732{ 745{
733 /* change interface to 3 which allows the biggest packet sizes */ 746 /* change interface to 3 which allows the biggest packet sizes */
734 int i, errCode; 747 int i, errCode;
735 const int sb_size = EM28XX_NUM_PACKETS * dev->max_pkt_size; 748 int sb_size;
749
750 em28xx_set_alternate(dev);
751 sb_size = EM28XX_NUM_PACKETS * dev->max_pkt_size;
736 752
737 /* reset streaming vars */ 753 /* reset streaming vars */
738 dev->frame_current = NULL; 754 dev->frame_current = NULL;
@@ -741,7 +757,7 @@ int em28xx_init_isoc(struct em28xx *dev)
741 /* allocate urbs */ 757 /* allocate urbs */
742 for (i = 0; i < EM28XX_NUM_BUFS; i++) { 758 for (i = 0; i < EM28XX_NUM_BUFS; i++) {
743 struct urb *urb; 759 struct urb *urb;
744 int j, k; 760 int j;
745 /* allocate transfer buffer */ 761 /* allocate transfer buffer */
746 urb = usb_alloc_urb(EM28XX_NUM_PACKETS, GFP_KERNEL); 762 urb = usb_alloc_urb(EM28XX_NUM_PACKETS, GFP_KERNEL);
747 if (!urb){ 763 if (!urb){
@@ -749,7 +765,9 @@ int em28xx_init_isoc(struct em28xx *dev)
749 em28xx_uninit_isoc(dev); 765 em28xx_uninit_isoc(dev);
750 return -ENOMEM; 766 return -ENOMEM;
751 } 767 }
752 dev->transfer_buffer[i] = usb_buffer_alloc(dev->udev, sb_size, GFP_KERNEL,&urb->transfer_dma); 768 dev->transfer_buffer[i] = usb_buffer_alloc(dev->udev, sb_size,
769 GFP_KERNEL,
770 &urb->transfer_dma);
753 if (!dev->transfer_buffer[i]) { 771 if (!dev->transfer_buffer[i]) {
754 em28xx_errdev 772 em28xx_errdev
755 ("unable to allocate %i bytes for transfer buffer %i\n", 773 ("unable to allocate %i bytes for transfer buffer %i\n",
@@ -762,22 +780,22 @@ int em28xx_init_isoc(struct em28xx *dev)
762 urb->dev = dev->udev; 780 urb->dev = dev->udev;
763 urb->context = dev; 781 urb->context = dev;
764 urb->pipe = usb_rcvisocpipe(dev->udev, 0x82); 782 urb->pipe = usb_rcvisocpipe(dev->udev, 0x82);
765 urb->transfer_flags = URB_ISO_ASAP; 783 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
766 urb->interval = 1; 784 urb->interval = 1;
767 urb->transfer_buffer = dev->transfer_buffer[i]; 785 urb->transfer_buffer = dev->transfer_buffer[i];
768 urb->complete = em28xx_isocIrq; 786 urb->complete = em28xx_isocIrq;
769 urb->number_of_packets = EM28XX_NUM_PACKETS; 787 urb->number_of_packets = EM28XX_NUM_PACKETS;
770 urb->transfer_buffer_length = sb_size; 788 urb->transfer_buffer_length = sb_size;
771 for (j = k = 0; j < EM28XX_NUM_PACKETS; 789 for (j = 0; j < EM28XX_NUM_PACKETS; j++) {
772 j++, k += dev->max_pkt_size) { 790 urb->iso_frame_desc[j].offset = j * dev->max_pkt_size;
773 urb->iso_frame_desc[j].offset = k; 791 urb->iso_frame_desc[j].length = dev->max_pkt_size;
774 urb->iso_frame_desc[j].length =
775 dev->max_pkt_size;
776 } 792 }
777 dev->urb[i] = urb; 793 dev->urb[i] = urb;
778 } 794 }
779 795
780 /* submit urbs */ 796 /* submit urbs */
797 em28xx_coredbg("Submitting %d urbs of %d packets (%d each)\n",
798 EM28XX_NUM_BUFS, EM28XX_NUM_PACKETS, dev->max_pkt_size);
781 for (i = 0; i < EM28XX_NUM_BUFS; i++) { 799 for (i = 0; i < EM28XX_NUM_BUFS; i++) {
782 errCode = usb_submit_urb(dev->urb[i], GFP_KERNEL); 800 errCode = usb_submit_urb(dev->urb[i], GFP_KERNEL);
783 if (errCode) { 801 if (errCode) {
@@ -794,22 +812,31 @@ int em28xx_init_isoc(struct em28xx *dev)
794int em28xx_set_alternate(struct em28xx *dev) 812int em28xx_set_alternate(struct em28xx *dev)
795{ 813{
796 int errCode, prev_alt = dev->alt; 814 int errCode, prev_alt = dev->alt;
797 dev->alt = alt; 815 int i;
798 if (dev->alt == 0) { 816 unsigned int min_pkt_size = dev->bytesperline+4;
799 int i; 817
800 for(i=0;i< dev->num_alt; i++) 818 /* When image size is bigger than a ceirtain value,
801 if(dev->alt_max_pkt_size[i]>dev->alt_max_pkt_size[dev->alt]) 819 the frame size should be increased, otherwise, only
802 dev->alt=i; 820 green screen will be received.
803 } 821 */
822 if (dev->frame_size > 720*240*2)
823 min_pkt_size *= 2;
824
825 for (i = 0; i < dev->num_alt; i++)
826 if (dev->alt_max_pkt_size[i] >= min_pkt_size)
827 break;
828 dev->alt = i;
804 829
805 if (dev->alt != prev_alt) { 830 if (dev->alt != prev_alt) {
831 em28xx_coredbg("minimum isoc packet size: %u (alt=%d)\n",
832 min_pkt_size, dev->alt);
806 dev->max_pkt_size = dev->alt_max_pkt_size[dev->alt]; 833 dev->max_pkt_size = dev->alt_max_pkt_size[dev->alt];
807 em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n", dev->alt, 834 em28xx_coredbg("setting alternate %d with wMaxPacketSize=%u\n",
808 dev->max_pkt_size); 835 dev->alt, dev->max_pkt_size);
809 errCode = usb_set_interface(dev->udev, 0, dev->alt); 836 errCode = usb_set_interface(dev->udev, 0, dev->alt);
810 if (errCode < 0) { 837 if (errCode < 0) {
811 em28xx_errdev ("cannot change alternate number to %d (error=%i)\n", 838 em28xx_errdev ("cannot change alternate number to %d (error=%i)\n",
812 dev->alt, errCode); 839 dev->alt, errCode);
813 return errCode; 840 return errCode;
814 } 841 }
815 } 842 }
diff --git a/drivers/media/video/em28xx/em28xx-video.c b/drivers/media/video/em28xx/em28xx-video.c
index a0c334672488..4abe6701a770 100644
--- a/drivers/media/video/em28xx/em28xx-video.c
+++ b/drivers/media/video/em28xx/em28xx-video.c
@@ -189,7 +189,7 @@ static void video_mux(struct em28xx *dev, int index)
189 em28xx_i2c_call_clients(dev, VIDIOC_INT_S_AUDIO_ROUTING, &route); 189 em28xx_i2c_call_clients(dev, VIDIOC_INT_S_AUDIO_ROUTING, &route);
190 } 190 }
191 191
192 em28xx_set_audio_source(dev); 192 em28xx_audio_analog_set(dev);
193} 193}
194 194
195/* Usage lock check functions */ 195/* Usage lock check functions */
@@ -830,6 +830,63 @@ static int vidioc_s_frequency(struct file *file, void *priv,
830 return 0; 830 return 0;
831} 831}
832 832
833#ifdef CONFIG_VIDEO_ADV_DEBUG
834static int em28xx_reg_len(int reg)
835{
836 switch (reg) {
837 case AC97LSB_REG:
838 case HSCALELOW_REG:
839 case VSCALELOW_REG:
840 return 2;
841 default:
842 return 1;
843 }
844}
845
846static int vidioc_g_register(struct file *file, void *priv,
847 struct v4l2_register *reg)
848{
849 struct em28xx_fh *fh = priv;
850 struct em28xx *dev = fh->dev;
851 int ret;
852
853 if (!v4l2_chip_match_host(reg->match_type, reg->match_chip))
854 return -EINVAL;
855
856 if (em28xx_reg_len(reg->reg) == 1) {
857 ret = em28xx_read_reg(dev, reg->reg);
858 if (ret < 0)
859 return ret;
860
861 reg->val = ret;
862 } else {
863 u64 val = 0;
864 ret = em28xx_read_reg_req_len(dev, USB_REQ_GET_STATUS,
865 reg->reg, (char *)&val, 2);
866 if (ret < 0)
867 return ret;
868
869 reg->val = cpu_to_le64((__u64)val);
870 }
871
872 return 0;
873}
874
875static int vidioc_s_register(struct file *file, void *priv,
876 struct v4l2_register *reg)
877{
878 struct em28xx_fh *fh = priv;
879 struct em28xx *dev = fh->dev;
880 u64 buf;
881
882 buf = le64_to_cpu((__u64)reg->val);
883
884 return em28xx_write_regs(dev, reg->reg, (char *)&buf,
885 em28xx_reg_len(reg->reg));
886}
887#endif
888
889
833static int vidioc_cropcap(struct file *file, void *priv, 890static int vidioc_cropcap(struct file *file, void *priv,
834 struct v4l2_cropcap *cc) 891 struct v4l2_cropcap *cc)
835{ 892{
@@ -1295,8 +1352,6 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1295 filp->private_data = fh; 1352 filp->private_data = fh;
1296 1353
1297 if (dev->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) { 1354 if (dev->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && dev->users == 0) {
1298 em28xx_set_alternate(dev);
1299
1300 dev->width = norm_maxw(dev); 1355 dev->width = norm_maxw(dev);
1301 dev->height = norm_maxh(dev); 1356 dev->height = norm_maxh(dev);
1302 dev->frame_size = dev->width * dev->height * 2; 1357 dev->frame_size = dev->width * dev->height * 2;
@@ -1305,6 +1360,7 @@ static int em28xx_v4l2_open(struct inode *inode, struct file *filp)
1305 dev->hscale = 0; 1360 dev->hscale = 0;
1306 dev->vscale = 0; 1361 dev->vscale = 0;
1307 1362
1363 em28xx_set_alternate(dev);
1308 em28xx_capture_start(dev, 1); 1364 em28xx_capture_start(dev, 1);
1309 em28xx_resolution_set(dev); 1365 em28xx_resolution_set(dev);
1310 1366
@@ -1730,6 +1786,10 @@ static const struct video_device em28xx_video_template = {
1730 .vidioc_s_tuner = vidioc_s_tuner, 1786 .vidioc_s_tuner = vidioc_s_tuner,
1731 .vidioc_g_frequency = vidioc_g_frequency, 1787 .vidioc_g_frequency = vidioc_g_frequency,
1732 .vidioc_s_frequency = vidioc_s_frequency, 1788 .vidioc_s_frequency = vidioc_s_frequency,
1789#ifdef CONFIG_VIDEO_ADV_DEBUG
1790 .vidioc_g_register = vidioc_g_register,
1791 .vidioc_s_register = vidioc_s_register,
1792#endif
1733 1793
1734 .tvnorms = V4L2_STD_ALL, 1794 .tvnorms = V4L2_STD_ALL,
1735 .current_norm = V4L2_STD_PAL, 1795 .current_norm = V4L2_STD_PAL,
@@ -1752,6 +1812,10 @@ static struct video_device em28xx_radio_template = {
1752 .vidioc_s_ctrl = vidioc_s_ctrl, 1812 .vidioc_s_ctrl = vidioc_s_ctrl,
1753 .vidioc_g_frequency = vidioc_g_frequency, 1813 .vidioc_g_frequency = vidioc_g_frequency,
1754 .vidioc_s_frequency = vidioc_s_frequency, 1814 .vidioc_s_frequency = vidioc_s_frequency,
1815#ifdef CONFIG_VIDEO_ADV_DEBUG
1816 .vidioc_g_register = vidioc_g_register,
1817 .vidioc_s_register = vidioc_s_register,
1818#endif
1755}; 1819};
1756 1820
1757/******************************** usb interface *****************************************/ 1821/******************************** usb interface *****************************************/
@@ -1796,10 +1860,10 @@ void em28xx_unregister_extension(struct em28xx_ops *ops)
1796} 1860}
1797EXPORT_SYMBOL(em28xx_unregister_extension); 1861EXPORT_SYMBOL(em28xx_unregister_extension);
1798 1862
1799struct video_device *em28xx_vdev_init(struct em28xx *dev, 1863static struct video_device *em28xx_vdev_init(struct em28xx *dev,
1800 const struct video_device *template, 1864 const struct video_device *template,
1801 const int type, 1865 const int type,
1802 const char *type_name) 1866 const char *type_name)
1803{ 1867{
1804 struct video_device *vfd; 1868 struct video_device *vfd;
1805 1869
@@ -2064,6 +2128,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
2064 snprintf(dev->name, 29, "em28xx #%d", nr); 2128 snprintf(dev->name, 29, "em28xx #%d", nr);
2065 dev->devno = nr; 2129 dev->devno = nr;
2066 dev->model = id->driver_info; 2130 dev->model = id->driver_info;
2131 dev->alt = -1;
2067 2132
2068 /* Checks if audio is provided by some interface */ 2133 /* Checks if audio is provided by some interface */
2069 for (i = 0; i < udev->config->desc.bNumInterfaces; i++) { 2134 for (i = 0; i < udev->config->desc.bNumInterfaces; i++) {
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index f3bad0c1c517..04e0e48ecabe 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -33,7 +33,7 @@
33#define UNSET -1 33#define UNSET -1
34 34
35/* maximum number of em28xx boards */ 35/* maximum number of em28xx boards */
36#define EM28XX_MAXBOARDS 1 /*FIXME: should be bigger */ 36#define EM28XX_MAXBOARDS 4 /*FIXME: should be bigger */
37 37
38/* maximum number of frames that can be queued */ 38/* maximum number of frames that can be queued */
39#define EM28XX_NUM_FRAMES 5 39#define EM28XX_NUM_FRAMES 5
@@ -345,9 +345,6 @@ int em28xx_read_reg(struct em28xx *dev, u16 reg);
345int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf, 345int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
346 int len); 346 int len);
347int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len); 347int em28xx_write_regs(struct em28xx *dev, u16 reg, char *buf, int len);
348int em28xx_write_reg_bits(struct em28xx *dev, u16 reg, u8 val,
349 u8 bitmask);
350int em28xx_set_audio_source(struct em28xx *dev);
351int em28xx_audio_analog_set(struct em28xx *dev); 348int em28xx_audio_analog_set(struct em28xx *dev);
352 349
353int em28xx_colorlevels_set_default(struct em28xx *dev); 350int em28xx_colorlevels_set_default(struct em28xx *dev);
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 7d7f383b404f..262830da08c8 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -928,27 +928,38 @@ struct saa7134_board saa7134_boards[] = {
928 .tuner_addr = ADDR_UNSET, 928 .tuner_addr = ADDR_UNSET,
929 .radio_addr = ADDR_UNSET, 929 .radio_addr = ADDR_UNSET,
930 .tda9887_conf = TDA9887_PRESENT, 930 .tda9887_conf = TDA9887_PRESENT,
931 .gpiomask = 0x03,
931 .inputs = {{ 932 .inputs = {{
932 .name = name_tv, 933 .name = name_tv,
933 .vmux = 1, 934 .vmux = 1,
934 .amux = TV, 935 .amux = TV,
935 .tv = 1, 936 .tv = 1,
936 },{ 937 .gpio = 0x00,
938 }, {
937 .name = name_comp1, 939 .name = name_comp1,
938 .vmux = 0,
939 .amux = LINE2,
940 },{
941 .name = name_comp2,
942 .vmux = 3, 940 .vmux = 3,
943 .amux = LINE2, 941 .amux = LINE1,
944 },{ 942 .gpio = 0x02,
943 }, {
944 .name = name_comp2,
945 .vmux = 0,
946 .amux = LINE1,
947 .gpio = 0x02,
948 }, {
945 .name = name_svideo, 949 .name = name_svideo,
946 .vmux = 8, 950 .vmux = 8,
947 .amux = LINE2, 951 .amux = LINE1,
948 }}, 952 .gpio = 0x02,
953 } },
949 .radio = { 954 .radio = {
950 .name = name_radio, 955 .name = name_radio,
951 .amux = LINE2, 956 .amux = LINE1,
957 .gpio = 0x01,
958 },
959 .mute = {
960 .name = name_mute,
961 .amux = TV,
962 .gpio = 0x00,
952 }, 963 },
953 }, 964 },
954 [SAA7134_BOARD_BMK_MPEX_TUNER] = { 965 [SAA7134_BOARD_BMK_MPEX_TUNER] = {
@@ -3912,6 +3923,74 @@ struct saa7134_board saa7134_boards[] = {
3912 }, 3923 },
3913 .mpeg = SAA7134_MPEG_EMPRESS, 3924 .mpeg = SAA7134_MPEG_EMPRESS,
3914 }, 3925 },
3926 [SAA7134_BOARD_TWINHAN_DTV_DVB_3056] = {
3927 .name = "Twinhan Hybrid DTV-DVB 3056 PCI",
3928 .audio_clock = 0x00187de7,
3929 .tuner_type = TUNER_PHILIPS_TDA8290,
3930 .radio_type = UNSET,
3931 .tuner_addr = ADDR_UNSET,
3932 .radio_addr = ADDR_UNSET,
3933 .tuner_config = 2,
3934 .mpeg = SAA7134_MPEG_DVB,
3935 .gpiomask = 0x0200000,
3936 .inputs = {{
3937 .name = name_tv,
3938 .vmux = 1,
3939 .amux = TV,
3940 .tv = 1,
3941 }, {
3942 .name = name_comp1,
3943 .vmux = 3,
3944 .amux = LINE1,
3945 }, {
3946 .name = name_svideo,
3947 .vmux = 8, /* untested */
3948 .amux = LINE1,
3949 } },
3950 .radio = {
3951 .name = name_radio,
3952 .amux = TV,
3953 .gpio = 0x0200000,
3954 },
3955 },
3956 [SAA7134_BOARD_GENIUS_TVGO_A11MCE] = {
3957 /* Adrian Pardini <pardo.bsso@gmail.com> */
3958 .name = "Genius TVGO AM11MCE",
3959 .audio_clock = 0x00200000,
3960 .tuner_type = TUNER_TNF_5335MF,
3961 .radio_type = UNSET,
3962 .tuner_addr = ADDR_UNSET,
3963 .radio_addr = ADDR_UNSET,
3964 .gpiomask = 0xf000,
3965 .inputs = {{
3966 .name = name_tv_mono,
3967 .vmux = 1,
3968 .amux = LINE2,
3969 .gpio = 0x0000,
3970 .tv = 1,
3971 }, {
3972 .name = name_comp1,
3973 .vmux = 3,
3974 .amux = LINE1,
3975 .gpio = 0x2000,
3976 .tv = 1
3977 }, {
3978 .name = name_svideo,
3979 .vmux = 8,
3980 .amux = LINE1,
3981 .gpio = 0x2000,
3982 } },
3983 .radio = {
3984 .name = name_radio,
3985 .amux = LINE2,
3986 .gpio = 0x1000,
3987 },
3988 .mute = {
3989 .name = name_mute,
3990 .amux = LINE2,
3991 .gpio = 0x6000,
3992 },
3993 },
3915}; 3994};
3916 3995
3917const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards); 3996const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -4511,6 +4590,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
4511 },{ 4590 },{
4512 .vendor = PCI_VENDOR_ID_PHILIPS, 4591 .vendor = PCI_VENDOR_ID_PHILIPS,
4513 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 4592 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4593 .subvendor = 0x5168,
4594 .subdevice = 0x3307, /* FlyDVB-T Hybrid Mini PCI */
4595 .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS,
4596 }, {
4597 .vendor = PCI_VENDOR_ID_PHILIPS,
4598 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4514 .subvendor = 0x16be, 4599 .subvendor = 0x16be,
4515 .subdevice = 0x0007, 4600 .subdevice = 0x0007,
4516 .driver_data = SAA7134_BOARD_MEDION_MD8800_QUADRO, 4601 .driver_data = SAA7134_BOARD_MEDION_MD8800_QUADRO,
@@ -4523,6 +4608,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
4523 },{ 4608 },{
4524 .vendor = PCI_VENDOR_ID_PHILIPS, 4609 .vendor = PCI_VENDOR_ID_PHILIPS,
4525 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 4610 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4611 .subvendor = 0x16be,
4612 .subdevice = 0x000d, /* triple CTX948_V1.1.1 */
4613 .driver_data = SAA7134_BOARD_MEDION_MD8800_QUADRO,
4614 }, {
4615 .vendor = PCI_VENDOR_ID_PHILIPS,
4616 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4526 .subvendor = 0x1461, 4617 .subvendor = 0x1461,
4527 .subdevice = 0x2c05, 4618 .subdevice = 0x2c05,
4528 .driver_data = SAA7134_BOARD_AVERMEDIA_777, 4619 .driver_data = SAA7134_BOARD_AVERMEDIA_777,
@@ -4843,7 +4934,13 @@ struct pci_device_id saa7134_pci_tbl[] = {
4843 .device = PCI_DEVICE_ID_PHILIPS_SAA7133, 4934 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4844 .subvendor = 0x4e42, 4935 .subvendor = 0x4e42,
4845 .subdevice = 0x3502, 4936 .subdevice = 0x3502,
4846 .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS 4937 .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS,
4938 }, {
4939 .vendor = PCI_VENDOR_ID_PHILIPS,
4940 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
4941 .subvendor = 0x1822, /*Twinhan Technology Co. Ltd*/
4942 .subdevice = 0x0022,
4943 .driver_data = SAA7134_BOARD_TWINHAN_DTV_DVB_3056,
4847 },{ 4944 },{
4848 /* --- boards without eeprom + subsystem ID --- */ 4945 /* --- boards without eeprom + subsystem ID --- */
4849 .vendor = PCI_VENDOR_ID_PHILIPS, 4946 .vendor = PCI_VENDOR_ID_PHILIPS,
@@ -4995,6 +5092,7 @@ int saa7134_board_init1(struct saa7134_dev *dev)
4995 case SAA7134_BOARD_BEHOLD_409: 5092 case SAA7134_BOARD_BEHOLD_409:
4996 case SAA7134_BOARD_BEHOLD_505FM: 5093 case SAA7134_BOARD_BEHOLD_505FM:
4997 case SAA7134_BOARD_BEHOLD_507_9FM: 5094 case SAA7134_BOARD_BEHOLD_507_9FM:
5095 case SAA7134_BOARD_GENIUS_TVGO_A11MCE:
4998 dev->has_remote = SAA7134_REMOTE_GPIO; 5096 dev->has_remote = SAA7134_REMOTE_GPIO;
4999 break; 5097 break;
5000 case SAA7134_BOARD_FLYDVBS_LR300: 5098 case SAA7134_BOARD_FLYDVBS_LR300:
@@ -5232,7 +5330,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
5232 case SAA7134_BOARD_ASUSTeK_P7131_DUAL: 5330 case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
5233 case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA: 5331 case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
5234 case SAA7134_BOARD_MEDION_MD8800_QUADRO: 5332 case SAA7134_BOARD_MEDION_MD8800_QUADRO:
5235 case SAA7134_BOARD_AVERMEDIA_SUPER_007: 5333 case SAA7134_BOARD_AVERMEDIA_SUPER_007:
5334 case SAA7134_BOARD_TWINHAN_DTV_DVB_3056:
5236 /* this is a hybrid board, initialize to analog mode 5335 /* this is a hybrid board, initialize to analog mode
5237 * and configure firmware eeprom address 5336 * and configure firmware eeprom address
5238 */ 5337 */
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index a9ca5730826f..ea2be9eceeb8 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -779,6 +779,21 @@ static struct tda1004x_config avermedia_super_007_config = {
779 .request_firmware = philips_tda1004x_request_firmware 779 .request_firmware = philips_tda1004x_request_firmware
780}; 780};
781 781
782static struct tda1004x_config twinhan_dtv_dvb_3056_config = {
783 .demod_address = 0x08,
784 .invert = 1,
785 .invert_oclk = 0,
786 .xtal_freq = TDA10046_XTAL_16M,
787 .agc_config = TDA10046_AGC_TDA827X,
788 .gpio_config = TDA10046_GP01_I,
789 .if_freq = TDA10046_FREQ_045,
790 .i2c_gate = 0x42,
791 .tuner_address = 0x61,
792 .tuner_config = 2,
793 .antenna_switch = 1,
794 .request_firmware = philips_tda1004x_request_firmware
795};
796
782/* ------------------------------------------------------------------ 797/* ------------------------------------------------------------------
783 * special case: this card uses saa713x GPIO22 for the mode switch 798 * special case: this card uses saa713x GPIO22 for the mode switch
784 */ 799 */
@@ -826,6 +841,7 @@ static struct tda1004x_config ads_tech_duo_config = {
826static struct tda10086_config flydvbs = { 841static struct tda10086_config flydvbs = {
827 .demod_address = 0x0e, 842 .demod_address = 0x0e,
828 .invert = 0, 843 .invert = 0,
844 .diseqc_tone = 0,
829}; 845};
830 846
831/* ================================================================== 847/* ==================================================================
@@ -940,9 +956,9 @@ static int dvb_init(struct saa7134_dev *dev)
940 configure_tda827x_fe(dev, &tda827x_lifeview_config); 956 configure_tda827x_fe(dev, &tda827x_lifeview_config);
941 break; 957 break;
942 case SAA7134_BOARD_FLYDVB_TRIO: 958 case SAA7134_BOARD_FLYDVB_TRIO:
943 if(! use_frontend) { //terrestrial 959 if(! use_frontend) { /* terrestrial */
944 configure_tda827x_fe(dev, &lifeview_trio_config); 960 configure_tda827x_fe(dev, &lifeview_trio_config);
945 } else { //satellite 961 } else { /* satellite */
946 dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap); 962 dev->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap);
947 if (dev->dvb.frontend) { 963 if (dev->dvb.frontend) {
948 if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x63, 964 if (dvb_attach(tda826x_attach, dev->dvb.frontend, 0x63,
@@ -1007,8 +1023,9 @@ static int dvb_init(struct saa7134_dev *dev)
1007 } 1023 }
1008 break; 1024 break;
1009 case SAA7134_BOARD_ASUS_EUROPA2_HYBRID: 1025 case SAA7134_BOARD_ASUS_EUROPA2_HYBRID:
1010 dev->dvb.frontend = tda10046_attach(&medion_cardbus, 1026 dev->dvb.frontend = dvb_attach(tda10046_attach,
1011 &dev->i2c_adap); 1027 &medion_cardbus,
1028 &dev->i2c_adap);
1012 if (dev->dvb.frontend) { 1029 if (dev->dvb.frontend) {
1013 dev->original_demod_sleep = dev->dvb.frontend->ops.sleep; 1030 dev->original_demod_sleep = dev->dvb.frontend->ops.sleep;
1014 dev->dvb.frontend->ops.sleep = philips_europa_demod_sleep; 1031 dev->dvb.frontend->ops.sleep = philips_europa_demod_sleep;
@@ -1044,6 +1061,9 @@ static int dvb_init(struct saa7134_dev *dev)
1044 case SAA7134_BOARD_AVERMEDIA_SUPER_007: 1061 case SAA7134_BOARD_AVERMEDIA_SUPER_007:
1045 configure_tda827x_fe(dev, &avermedia_super_007_config); 1062 configure_tda827x_fe(dev, &avermedia_super_007_config);
1046 break; 1063 break;
1064 case SAA7134_BOARD_TWINHAN_DTV_DVB_3056:
1065 configure_tda827x_fe(dev, &twinhan_dtv_dvb_3056_config);
1066 break;
1047 default: 1067 default:
1048 wprintk("Huh? unknown DVB card?\n"); 1068 wprintk("Huh? unknown DVB card?\n");
1049 break; 1069 break;
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index b1b01fa86720..3d2ec30de227 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -87,7 +87,7 @@ static int ts_open(struct inode *inode, struct file *file)
87 87
88 dprintk("open minor=%d\n",minor); 88 dprintk("open minor=%d\n",minor);
89 err = -EBUSY; 89 err = -EBUSY;
90 if (!mutex_trylock(&dev->empress_tsq.lock)) 90 if (!mutex_trylock(&dev->empress_tsq.vb_lock))
91 goto done; 91 goto done;
92 if (dev->empress_users) 92 if (dev->empress_users)
93 goto done_up; 93 goto done_up;
@@ -101,7 +101,7 @@ static int ts_open(struct inode *inode, struct file *file)
101 err = 0; 101 err = 0;
102 102
103done_up: 103done_up:
104 mutex_unlock(&dev->empress_tsq.lock); 104 mutex_unlock(&dev->empress_tsq.vb_lock);
105done: 105done:
106 return err; 106 return err;
107} 107}
@@ -110,7 +110,6 @@ static int ts_release(struct inode *inode, struct file *file)
110{ 110{
111 struct saa7134_dev *dev = file->private_data; 111 struct saa7134_dev *dev = file->private_data;
112 112
113 mutex_lock(&dev->empress_tsq.lock);
114 videobuf_stop(&dev->empress_tsq); 113 videobuf_stop(&dev->empress_tsq);
115 videobuf_mmap_free(&dev->empress_tsq); 114 videobuf_mmap_free(&dev->empress_tsq);
116 dev->empress_users--; 115 dev->empress_users--;
@@ -122,7 +121,6 @@ static int ts_release(struct inode *inode, struct file *file)
122 saa_writeb(SAA7134_AUDIO_MUTE_CTRL, 121 saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
123 saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6)); 122 saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6));
124 123
125 mutex_unlock(&dev->empress_tsq.lock);
126 return 0; 124 return 0;
127} 125}
128 126
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index 0db955c2d9b9..b4188819782f 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -406,6 +406,12 @@ int saa7134_input_init1(struct saa7134_dev *dev)
406 mask_keyup = 0x8000000; 406 mask_keyup = 0x8000000;
407 polling = 50; //ms 407 polling = 50; //ms
408 break; 408 break;
409 case SAA7134_BOARD_GENIUS_TVGO_A11MCE:
410 ir_codes = ir_codes_genius_tvgo_a11mce;
411 mask_keycode = 0xff;
412 mask_keydown = 0xf00000;
413 polling = 50; /* ms */
414 break;
409 } 415 }
410 if (NULL == ir_codes) { 416 if (NULL == ir_codes) {
411 printk("%s: Oops: IR config error [card=%d]\n", 417 printk("%s: Oops: IR config error [card=%d]\n",
diff --git a/drivers/media/video/saa7134/saa7134-video.c b/drivers/media/video/saa7134/saa7134-video.c
index 1184d359e848..39c41ad97d0e 100644
--- a/drivers/media/video/saa7134/saa7134-video.c
+++ b/drivers/media/video/saa7134/saa7134-video.c
@@ -1414,21 +1414,17 @@ video_poll(struct file *file, struct poll_table_struct *wait)
1414 if (!list_empty(&fh->cap.stream)) 1414 if (!list_empty(&fh->cap.stream))
1415 buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream); 1415 buf = list_entry(fh->cap.stream.next, struct videobuf_buffer, stream);
1416 } else { 1416 } else {
1417 mutex_lock(&fh->cap.lock); 1417 mutex_lock(&fh->cap.vb_lock);
1418 if (UNSET == fh->cap.read_off) { 1418 if (UNSET == fh->cap.read_off) {
1419 /* need to capture a new frame */ 1419 /* need to capture a new frame */
1420 if (res_locked(fh->dev,RESOURCE_VIDEO)) { 1420 if (res_locked(fh->dev,RESOURCE_VIDEO))
1421 mutex_unlock(&fh->cap.lock); 1421 goto err;
1422 return POLLERR; 1422 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,fh->cap.field))
1423 } 1423 goto err;
1424 if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,fh->cap.field)) {
1425 mutex_unlock(&fh->cap.lock);
1426 return POLLERR;
1427 }
1428 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf); 1424 fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
1429 fh->cap.read_off = 0; 1425 fh->cap.read_off = 0;
1430 } 1426 }
1431 mutex_unlock(&fh->cap.lock); 1427 mutex_unlock(&fh->cap.vb_lock);
1432 buf = fh->cap.read_buf; 1428 buf = fh->cap.read_buf;
1433 } 1429 }
1434 1430
@@ -1440,6 +1436,10 @@ video_poll(struct file *file, struct poll_table_struct *wait)
1440 buf->state == VIDEOBUF_ERROR) 1436 buf->state == VIDEOBUF_ERROR)
1441 return POLLIN|POLLRDNORM; 1437 return POLLIN|POLLRDNORM;
1442 return 0; 1438 return 0;
1439
1440err:
1441 mutex_unlock(&fh->cap.vb_lock);
1442 return POLLERR;
1443} 1443}
1444 1444
1445static int video_release(struct inode *inode, struct file *file) 1445static int video_release(struct inode *inode, struct file *file)
diff --git a/drivers/media/video/saa7134/saa7134.h b/drivers/media/video/saa7134/saa7134.h
index b88ca995fafb..f940d0254798 100644
--- a/drivers/media/video/saa7134/saa7134.h
+++ b/drivers/media/video/saa7134/saa7134.h
@@ -252,6 +252,8 @@ struct saa7134_format {
252#define SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM 128 252#define SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM 128
253#define SAA7134_BOARD_BEHOLD_607_9FM 129 253#define SAA7134_BOARD_BEHOLD_607_9FM 129
254#define SAA7134_BOARD_BEHOLD_M6 130 254#define SAA7134_BOARD_BEHOLD_M6 130
255#define SAA7134_BOARD_TWINHAN_DTV_DVB_3056 131
256#define SAA7134_BOARD_GENIUS_TVGO_A11MCE 132
255 257
256#define SAA7134_MAXBOARDS 8 258#define SAA7134_MAXBOARDS 8
257#define SAA7134_INPUT_MAX 8 259#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/video/stk-sensor.c b/drivers/media/video/stk-sensor.c
index 4a9a0b62efa3..e546b014d7ad 100644
--- a/drivers/media/video/stk-sensor.c
+++ b/drivers/media/video/stk-sensor.c
@@ -225,7 +225,7 @@
225 225
226 226
227/* Returns 0 if OK */ 227/* Returns 0 if OK */
228int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val) 228static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
229{ 229{
230 int i = 0; 230 int i = 0;
231 int tmpval = 0; 231 int tmpval = 0;
@@ -250,7 +250,7 @@ int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
250 return 0; 250 return 0;
251} 251}
252 252
253int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val) 253static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
254{ 254{
255 int i = 0; 255 int i = 0;
256 int tmpval = 0; 256 int tmpval = 0;
@@ -380,7 +380,7 @@ int stk_sensor_init(struct stk_camera *dev)
380 STK_ERROR("Strange error reading sensor ID\n"); 380 STK_ERROR("Strange error reading sensor ID\n");
381 return -ENODEV; 381 return -ENODEV;
382 } 382 }
383 if (idh != 0x7F || idl != 0xA2) { 383 if (idh != 0x7f || idl != 0xa2) {
384 STK_ERROR("Huh? you don't have a sensor from ovt\n"); 384 STK_ERROR("Huh? you don't have a sensor from ovt\n");
385 return -ENODEV; 385 return -ENODEV;
386 } 386 }
@@ -409,6 +409,19 @@ static struct regval ov_fmt_uyvy[] = {
409 {REG_COM15, COM15_R00FF }, 409 {REG_COM15, COM15_R00FF },
410 {0xff, 0xff}, /* END MARKER */ 410 {0xff, 0xff}, /* END MARKER */
411}; 411};
412/* V4L2_PIX_FMT_YUYV */
413static struct regval ov_fmt_yuyv[] = {
414 {REG_TSLB, 0 },
415 { 0x4f, 0x80 }, /* "matrix coefficient 1" */
416 { 0x50, 0x80 }, /* "matrix coefficient 2" */
417 { 0x51, 0 }, /* vb */
418 { 0x52, 0x22 }, /* "matrix coefficient 4" */
419 { 0x53, 0x5e }, /* "matrix coefficient 5" */
420 { 0x54, 0x80 }, /* "matrix coefficient 6" */
421 {REG_COM13, COM13_UVSAT|COM13_CMATRIX},
422 {REG_COM15, COM15_R00FF },
423 {0xff, 0xff}, /* END MARKER */
424};
412 425
413/* V4L2_PIX_FMT_RGB565X rrrrrggg gggbbbbb */ 426/* V4L2_PIX_FMT_RGB565X rrrrrggg gggbbbbb */
414static struct regval ov_fmt_rgbr[] = { 427static struct regval ov_fmt_rgbr[] = {
@@ -519,6 +532,10 @@ int stk_sensor_configure(struct stk_camera *dev)
519 com7 |= COM7_YUV; 532 com7 |= COM7_YUV;
520 rv = ov_fmt_uyvy; 533 rv = ov_fmt_uyvy;
521 break; 534 break;
535 case V4L2_PIX_FMT_YUYV:
536 com7 |= COM7_YUV;
537 rv = ov_fmt_yuyv;
538 break;
522 case V4L2_PIX_FMT_RGB565: 539 case V4L2_PIX_FMT_RGB565:
523 com7 |= COM7_RGB; 540 com7 |= COM7_RGB;
524 rv = ov_fmt_rgbp; 541 rv = ov_fmt_rgbp;
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index d37e5e2594b4..ceba45ad0294 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -63,7 +63,7 @@ static struct usb_device_id stkwebcam_table[] = {
63}; 63};
64MODULE_DEVICE_TABLE(usb, stkwebcam_table); 64MODULE_DEVICE_TABLE(usb, stkwebcam_table);
65 65
66void stk_camera_cleanup(struct kref *kref) 66static void stk_camera_cleanup(struct kref *kref)
67{ 67{
68 struct stk_camera *dev = to_stk_camera(kref); 68 struct stk_camera *dev = to_stk_camera(kref);
69 69
@@ -682,6 +682,7 @@ static int v4l_stk_open(struct inode *inode, struct file *fp)
682 return -ENXIO; 682 return -ENXIO;
683 fp->private_data = vdev; 683 fp->private_data = vdev;
684 kref_get(&dev->kref); 684 kref_get(&dev->kref);
685 usb_autopm_get_interface(dev->interface);
685 686
686 return 0; 687 return 0;
687} 688}
@@ -703,6 +704,7 @@ static int v4l_stk_release(struct inode *inode, struct file *fp)
703 } 704 }
704 705
705 if (dev->owner != fp) { 706 if (dev->owner != fp) {
707 usb_autopm_put_interface(dev->interface);
706 kref_put(&dev->kref, stk_camera_cleanup); 708 kref_put(&dev->kref, stk_camera_cleanup);
707 return 0; 709 return 0;
708 } 710 }
@@ -713,6 +715,7 @@ static int v4l_stk_release(struct inode *inode, struct file *fp)
713 715
714 dev->owner = NULL; 716 dev->owner = NULL;
715 717
718 usb_autopm_put_interface(dev->interface);
716 kref_put(&dev->kref, stk_camera_cleanup); 719 kref_put(&dev->kref, stk_camera_cleanup);
717 720
718 return 0; 721 return 0;
@@ -993,6 +996,10 @@ static int stk_vidioc_enum_fmt_cap(struct file *filp,
993 fmtd->pixelformat = V4L2_PIX_FMT_SBGGR8; 996 fmtd->pixelformat = V4L2_PIX_FMT_SBGGR8;
994 strcpy(fmtd->description, "Raw bayer"); 997 strcpy(fmtd->description, "Raw bayer");
995 break; 998 break;
999 case 4:
1000 fmtd->pixelformat = V4L2_PIX_FMT_YUYV;
1001 strcpy(fmtd->description, "yuv4:2:2");
1002 break;
996 default: 1003 default:
997 return -EINVAL; 1004 return -EINVAL;
998 } 1005 }
@@ -1048,6 +1055,7 @@ static int stk_vidioc_try_fmt_cap(struct file *filp,
1048 case V4L2_PIX_FMT_RGB565: 1055 case V4L2_PIX_FMT_RGB565:
1049 case V4L2_PIX_FMT_RGB565X: 1056 case V4L2_PIX_FMT_RGB565X:
1050 case V4L2_PIX_FMT_UYVY: 1057 case V4L2_PIX_FMT_UYVY:
1058 case V4L2_PIX_FMT_YUYV:
1051 case V4L2_PIX_FMT_SBGGR8: 1059 case V4L2_PIX_FMT_SBGGR8:
1052 break; 1060 break;
1053 default: 1061 default:
@@ -1080,6 +1088,42 @@ static int stk_vidioc_try_fmt_cap(struct file *filp,
1080 return 0; 1088 return 0;
1081} 1089}
1082 1090
1091static int stk_setup_format(struct stk_camera *dev)
1092{
1093 int i = 0;
1094 int depth;
1095 if (dev->vsettings.palette == V4L2_PIX_FMT_SBGGR8)
1096 depth = 1;
1097 else
1098 depth = 2;
1099 while (stk_sizes[i].m != dev->vsettings.mode
1100 && i < ARRAY_SIZE(stk_sizes))
1101 i++;
1102 if (i == ARRAY_SIZE(stk_sizes)) {
1103 STK_ERROR("Something is broken in %s\n", __FUNCTION__);
1104 return -EFAULT;
1105 }
1106 /* This registers controls some timings, not sure of what. */
1107 stk_camera_write_reg(dev, 0x001b, 0x0e);
1108 if (dev->vsettings.mode == MODE_SXGA)
1109 stk_camera_write_reg(dev, 0x001c, 0x0e);
1110 else
1111 stk_camera_write_reg(dev, 0x001c, 0x46);
1112 /*
1113 * Registers 0x0115 0x0114 are the size of each line (bytes),
1114 * regs 0x0117 0x0116 are the heigth of the image.
1115 */
1116 stk_camera_write_reg(dev, 0x0115,
1117 ((stk_sizes[i].w * depth) >> 8) & 0xff);
1118 stk_camera_write_reg(dev, 0x0114,
1119 (stk_sizes[i].w * depth) & 0xff);
1120 stk_camera_write_reg(dev, 0x0117,
1121 (stk_sizes[i].h >> 8) & 0xff);
1122 stk_camera_write_reg(dev, 0x0116,
1123 stk_sizes[i].h & 0xff);
1124 return stk_sensor_configure(dev);
1125}
1126
1083static int stk_vidioc_s_fmt_cap(struct file *filp, 1127static int stk_vidioc_s_fmt_cap(struct file *filp,
1084 void *priv, struct v4l2_format *fmtd) 1128 void *priv, struct v4l2_format *fmtd)
1085{ 1129{
@@ -1094,10 +1138,10 @@ static int stk_vidioc_s_fmt_cap(struct file *filp,
1094 return -EBUSY; 1138 return -EBUSY;
1095 if (dev->owner && dev->owner != filp) 1139 if (dev->owner && dev->owner != filp)
1096 return -EBUSY; 1140 return -EBUSY;
1097 dev->owner = filp;
1098 ret = stk_vidioc_try_fmt_cap(filp, priv, fmtd); 1141 ret = stk_vidioc_try_fmt_cap(filp, priv, fmtd);
1099 if (ret) 1142 if (ret)
1100 return ret; 1143 return ret;
1144 dev->owner = filp;
1101 1145
1102 dev->vsettings.palette = fmtd->fmt.pix.pixelformat; 1146 dev->vsettings.palette = fmtd->fmt.pix.pixelformat;
1103 stk_free_buffers(dev); 1147 stk_free_buffers(dev);
@@ -1105,25 +1149,7 @@ static int stk_vidioc_s_fmt_cap(struct file *filp,
1105 dev->vsettings.mode = stk_sizes[fmtd->fmt.pix.priv].m; 1149 dev->vsettings.mode = stk_sizes[fmtd->fmt.pix.priv].m;
1106 1150
1107 stk_initialise(dev); 1151 stk_initialise(dev);
1108 /* This registers controls some timings, not sure of what. */ 1152 return stk_setup_format(dev);
1109 stk_camera_write_reg(dev, 0x001b, 0x0e);
1110 if (dev->vsettings.mode == MODE_SXGA)
1111 stk_camera_write_reg(dev, 0x001c, 0x0e);
1112 else
1113 stk_camera_write_reg(dev, 0x001c, 0x46);
1114 /*
1115 * Registers 0x0115 0x0114 are the size of each line (bytes),
1116 * regs 0x0117 0x0116 are the heigth of the image.
1117 */
1118 stk_camera_write_reg(dev, 0x0115,
1119 (fmtd->fmt.pix.bytesperline >> 8) & 0xff);
1120 stk_camera_write_reg(dev, 0x0114,
1121 fmtd->fmt.pix.bytesperline & 0xff);
1122 stk_camera_write_reg(dev, 0x0117,
1123 (fmtd->fmt.pix.height >> 8) & 0xff);
1124 stk_camera_write_reg(dev, 0x0116,
1125 fmtd->fmt.pix.height & 0xff);
1126 return stk_sensor_configure(dev);
1127} 1153}
1128 1154
1129static int stk_vidioc_reqbufs(struct file *filp, 1155static int stk_vidioc_reqbufs(struct file *filp,
@@ -1288,6 +1314,9 @@ static struct file_operations v4l_stk_fops = {
1288 .poll = v4l_stk_poll, 1314 .poll = v4l_stk_poll,
1289 .mmap = v4l_stk_mmap, 1315 .mmap = v4l_stk_mmap,
1290 .ioctl = video_ioctl2, 1316 .ioctl = video_ioctl2,
1317#ifdef CONFIG_COMPAT
1318 .compat_ioctl = v4l_compat_ioctl32,
1319#endif
1291 .llseek = no_llseek 1320 .llseek = no_llseek
1292}; 1321};
1293 1322
@@ -1403,7 +1432,7 @@ static int stk_camera_probe(struct usb_interface *interface,
1403 dev->vsettings.brightness = 0x7fff; 1432 dev->vsettings.brightness = 0x7fff;
1404 dev->vsettings.palette = V4L2_PIX_FMT_RGB565; 1433 dev->vsettings.palette = V4L2_PIX_FMT_RGB565;
1405 dev->vsettings.mode = MODE_VGA; 1434 dev->vsettings.mode = MODE_VGA;
1406 dev->frame_size = 640*480*2; 1435 dev->frame_size = 640 * 480 * 2;
1407 1436
1408 INIT_LIST_HEAD(&dev->sio_avail); 1437 INIT_LIST_HEAD(&dev->sio_avail);
1409 INIT_LIST_HEAD(&dev->sio_full); 1438 INIT_LIST_HEAD(&dev->sio_full);
@@ -1417,6 +1446,7 @@ static int stk_camera_probe(struct usb_interface *interface,
1417 } 1446 }
1418 1447
1419 stk_create_sysfs_files(&dev->vdev); 1448 stk_create_sysfs_files(&dev->vdev);
1449 usb_autopm_enable(dev->interface);
1420 1450
1421 return 0; 1451 return 0;
1422} 1452}
@@ -1434,11 +1464,41 @@ static void stk_camera_disconnect(struct usb_interface *interface)
1434 kref_put(&dev->kref, stk_camera_cleanup); 1464 kref_put(&dev->kref, stk_camera_cleanup);
1435} 1465}
1436 1466
1467#ifdef CONFIG_PM
1468int stk_camera_suspend(struct usb_interface *intf, pm_message_t message)
1469{
1470 struct stk_camera *dev = usb_get_intfdata(intf);
1471 if (is_streaming(dev)) {
1472 stk_stop_stream(dev);
1473 /* yes, this is ugly */
1474 set_streaming(dev);
1475 }
1476 return 0;
1477}
1478
1479int stk_camera_resume(struct usb_interface *intf)
1480{
1481 struct stk_camera *dev = usb_get_intfdata(intf);
1482 if (!is_initialised(dev))
1483 return 0;
1484 unset_initialised(dev);
1485 stk_initialise(dev);
1486 stk_setup_format(dev);
1487 if (is_streaming(dev))
1488 stk_start_stream(dev);
1489 return 0;
1490}
1491#endif
1492
1437static struct usb_driver stk_camera_driver = { 1493static struct usb_driver stk_camera_driver = {
1438 .name = "stkwebcam", 1494 .name = "stkwebcam",
1439 .probe = stk_camera_probe, 1495 .probe = stk_camera_probe,
1440 .disconnect = stk_camera_disconnect, 1496 .disconnect = stk_camera_disconnect,
1441 .id_table = stkwebcam_table, 1497 .id_table = stkwebcam_table,
1498#ifdef CONFIG_PM
1499 .suspend = stk_camera_suspend,
1500 .resume = stk_camera_resume,
1501#endif
1442}; 1502};
1443 1503
1444 1504
diff --git a/drivers/media/video/stk-webcam.h b/drivers/media/video/stk-webcam.h
index 7e989d1ac1e0..df4dfefc5327 100644
--- a/drivers/media/video/stk-webcam.h
+++ b/drivers/media/video/stk-webcam.h
@@ -79,6 +79,7 @@ enum stk_status {
79#define unset_present(dev) ((dev)->status &= \ 79#define unset_present(dev) ((dev)->status &= \
80 ~(S_PRESENT|S_INITIALISED|S_STREAMING)) 80 ~(S_PRESENT|S_INITIALISED|S_STREAMING))
81#define set_initialised(dev) ((dev)->status |= S_INITIALISED) 81#define set_initialised(dev) ((dev)->status |= S_INITIALISED)
82#define unset_initialised(dev) ((dev)->status &= ~S_INITIALISED)
82#define set_memallocd(dev) ((dev)->status |= S_MEMALLOCD) 83#define set_memallocd(dev) ((dev)->status |= S_MEMALLOCD)
83#define unset_memallocd(dev) ((dev)->status &= ~S_MEMALLOCD) 84#define unset_memallocd(dev) ((dev)->status &= ~S_MEMALLOCD)
84#define set_streaming(dev) ((dev)->status |= S_STREAMING) 85#define set_streaming(dev) ((dev)->status |= S_STREAMING)
@@ -127,8 +128,6 @@ void stk_camera_delete(struct kref *);
127int stk_camera_write_reg(struct stk_camera *, u16, u8); 128int stk_camera_write_reg(struct stk_camera *, u16, u8);
128int stk_camera_read_reg(struct stk_camera *, u16, int *); 129int stk_camera_read_reg(struct stk_camera *, u16, int *);
129 130
130int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val);
131int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val);
132int stk_sensor_init(struct stk_camera *); 131int stk_sensor_init(struct stk_camera *);
133int stk_sensor_configure(struct stk_camera *); 132int stk_sensor_configure(struct stk_camera *);
134int stk_sensor_sleep(struct stk_camera *dev); 133int stk_sensor_sleep(struct stk_camera *dev);
diff --git a/drivers/media/video/tcm825x.c b/drivers/media/video/tcm825x.c
index 41cd6a0b0485..fb895f6684a3 100644
--- a/drivers/media/video/tcm825x.c
+++ b/drivers/media/video/tcm825x.c
@@ -851,7 +851,7 @@ static int tcm825x_probe(struct i2c_client *client)
851 sensor->platform_data = client->dev.platform_data; 851 sensor->platform_data = client->dev.platform_data;
852 852
853 if (sensor->platform_data == NULL 853 if (sensor->platform_data == NULL
854 && !sensor->platform_data->is_okay()) 854 || !sensor->platform_data->is_okay())
855 return -ENODEV; 855 return -ENODEV;
856 856
857 sensor->v4l2_int_device = &tcm825x_int_device; 857 sensor->v4l2_int_device = &tcm825x_int_device;
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index ba538f6fbcc3..78a09a2a4857 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -1038,7 +1038,7 @@ static int tuner_resume(struct i2c_client *c)
1038 1038
1039/* ---------------------------------------------------------------------- */ 1039/* ---------------------------------------------------------------------- */
1040 1040
1041LIST_HEAD(tuner_list); 1041static LIST_HEAD(tuner_list);
1042 1042
1043/* Search for existing radio and/or TV tuners on the given I2C adapter. 1043/* Search for existing radio and/or TV tuners on the given I2C adapter.
1044 Note that when this function is called from tuner_probe you can be 1044 Note that when this function is called from tuner_probe you can be
diff --git a/drivers/media/video/tuner-xc2028.c b/drivers/media/video/tuner-xc2028.c
index f191f6a48070..50cf876f020f 100644
--- a/drivers/media/video/tuner-xc2028.c
+++ b/drivers/media/video/tuner-xc2028.c
@@ -754,6 +754,9 @@ skip_std_specific:
754 goto check_device; 754 goto check_device;
755 } 755 }
756 756
757 if (new_fw.type & FM)
758 goto check_device;
759
757 /* Load SCODE firmware, if exists */ 760 /* Load SCODE firmware, if exists */
758 tuner_dbg("Trying to load scode %d\n", new_fw.scode_nr); 761 tuner_dbg("Trying to load scode %d\n", new_fw.scode_nr);
759 762
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index a75560540e79..01ebcec040c4 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -1571,14 +1571,14 @@ static int tvaudio_get_ctrl(struct CHIPSTATE *chip,
1571 ctrl->value=chip->muted; 1571 ctrl->value=chip->muted;
1572 return 0; 1572 return 0;
1573 case V4L2_CID_AUDIO_VOLUME: 1573 case V4L2_CID_AUDIO_VOLUME:
1574 if (!desc->flags & CHIP_HAS_VOLUME) 1574 if (!(desc->flags & CHIP_HAS_VOLUME))
1575 break; 1575 break;
1576 ctrl->value = max(chip->left,chip->right); 1576 ctrl->value = max(chip->left,chip->right);
1577 return 0; 1577 return 0;
1578 case V4L2_CID_AUDIO_BALANCE: 1578 case V4L2_CID_AUDIO_BALANCE:
1579 { 1579 {
1580 int volume; 1580 int volume;
1581 if (!desc->flags & CHIP_HAS_VOLUME) 1581 if (!(desc->flags & CHIP_HAS_VOLUME))
1582 break; 1582 break;
1583 volume = max(chip->left,chip->right); 1583 volume = max(chip->left,chip->right);
1584 if (volume) 1584 if (volume)
@@ -1621,7 +1621,7 @@ static int tvaudio_set_ctrl(struct CHIPSTATE *chip,
1621 { 1621 {
1622 int volume,balance; 1622 int volume,balance;
1623 1623
1624 if (!desc->flags & CHIP_HAS_VOLUME) 1624 if (!(desc->flags & CHIP_HAS_VOLUME))
1625 break; 1625 break;
1626 1626
1627 volume = max(chip->left,chip->right); 1627 volume = max(chip->left,chip->right);
@@ -1642,7 +1642,7 @@ static int tvaudio_set_ctrl(struct CHIPSTATE *chip,
1642 case V4L2_CID_AUDIO_BALANCE: 1642 case V4L2_CID_AUDIO_BALANCE:
1643 { 1643 {
1644 int volume, balance; 1644 int volume, balance;
1645 if (!desc->flags & CHIP_HAS_VOLUME) 1645 if (!(desc->flags & CHIP_HAS_VOLUME))
1646 break; 1646 break;
1647 1647
1648 volume = max(chip->left,chip->right); 1648 volume = max(chip->left,chip->right);
@@ -1702,7 +1702,7 @@ static int chip_command(struct i2c_client *client,
1702 break; 1702 break;
1703 case V4L2_CID_AUDIO_VOLUME: 1703 case V4L2_CID_AUDIO_VOLUME:
1704 case V4L2_CID_AUDIO_BALANCE: 1704 case V4L2_CID_AUDIO_BALANCE:
1705 if (!desc->flags & CHIP_HAS_VOLUME) 1705 if (!(desc->flags & CHIP_HAS_VOLUME))
1706 return -EINVAL; 1706 return -EINVAL;
1707 break; 1707 break;
1708 case V4L2_CID_AUDIO_BASS: 1708 case V4L2_CID_AUDIO_BASS:
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index 0b8fbad3c721..dc0da44a5af6 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -242,7 +242,7 @@ hauppauge_tuner[] =
242 { TUNER_ABSENT, "TCL M2523_3DBH_E"}, 242 { TUNER_ABSENT, "TCL M2523_3DBH_E"},
243 { TUNER_ABSENT, "TCL M2523_3DIH_E"}, 243 { TUNER_ABSENT, "TCL M2523_3DIH_E"},
244 { TUNER_ABSENT, "TCL MFPE05_2_U"}, 244 { TUNER_ABSENT, "TCL MFPE05_2_U"},
245 { TUNER_ABSENT, "Philips FMD1216MEX"}, 245 { TUNER_PHILIPS_FMD1216ME_MK3, "Philips FMD1216MEX"},
246 { TUNER_ABSENT, "Philips FRH2036B"}, 246 { TUNER_ABSENT, "Philips FRH2036B"},
247 { TUNER_ABSENT, "Panasonic ENGF75_01GF"}, 247 { TUNER_ABSENT, "Panasonic ENGF75_01GF"},
248 { TUNER_ABSENT, "MaxLinear MXL5005"}, 248 { TUNER_ABSENT, "MaxLinear MXL5005"},
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index c056ff6d810c..34deb68ae568 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -56,7 +56,6 @@
56#include <asm/pgtable.h> 56#include <asm/pgtable.h>
57#include <asm/io.h> 57#include <asm/io.h>
58#include <asm/div64.h> 58#include <asm/div64.h>
59#include <linux/video_decoder.h>
60#define __OLD_VIDIOC_ /* To allow fixing old calls*/ 59#define __OLD_VIDIOC_ /* To allow fixing old calls*/
61#include <media/v4l2-common.h> 60#include <media/v4l2-common.h>
62#include <media/v4l2-chip-ident.h> 61#include <media/v4l2-chip-ident.h>
@@ -82,108 +81,6 @@ MODULE_LICENSE("GPL");
82 */ 81 */
83 82
84 83
85char *v4l2_norm_to_name(v4l2_std_id id)
86{
87 char *name;
88 u32 myid = id;
89
90 /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
91 64 bit comparations. So, on that architecture, with some gcc variants,
92 compilation fails. Currently, the max value is 30bit wide.
93 */
94 BUG_ON(myid != id);
95
96 switch (myid) {
97 case V4L2_STD_PAL:
98 name="PAL"; break;
99 case V4L2_STD_PAL_BG:
100 name="PAL-BG"; break;
101 case V4L2_STD_PAL_DK:
102 name="PAL-DK"; break;
103 case V4L2_STD_PAL_B:
104 name="PAL-B"; break;
105 case V4L2_STD_PAL_B1:
106 name="PAL-B1"; break;
107 case V4L2_STD_PAL_G:
108 name="PAL-G"; break;
109 case V4L2_STD_PAL_H:
110 name="PAL-H"; break;
111 case V4L2_STD_PAL_I:
112 name="PAL-I"; break;
113 case V4L2_STD_PAL_D:
114 name="PAL-D"; break;
115 case V4L2_STD_PAL_D1:
116 name="PAL-D1"; break;
117 case V4L2_STD_PAL_K:
118 name="PAL-K"; break;
119 case V4L2_STD_PAL_M:
120 name="PAL-M"; break;
121 case V4L2_STD_PAL_N:
122 name="PAL-N"; break;
123 case V4L2_STD_PAL_Nc:
124 name="PAL-Nc"; break;
125 case V4L2_STD_PAL_60:
126 name="PAL-60"; break;
127 case V4L2_STD_NTSC:
128 name="NTSC"; break;
129 case V4L2_STD_NTSC_M:
130 name="NTSC-M"; break;
131 case V4L2_STD_NTSC_M_JP:
132 name="NTSC-M-JP"; break;
133 case V4L2_STD_NTSC_443:
134 name="NTSC-443"; break;
135 case V4L2_STD_NTSC_M_KR:
136 name="NTSC-M-KR"; break;
137 case V4L2_STD_SECAM:
138 name="SECAM"; break;
139 case V4L2_STD_SECAM_DK:
140 name="SECAM-DK"; break;
141 case V4L2_STD_SECAM_B:
142 name="SECAM-B"; break;
143 case V4L2_STD_SECAM_D:
144 name="SECAM-D"; break;
145 case V4L2_STD_SECAM_G:
146 name="SECAM-G"; break;
147 case V4L2_STD_SECAM_H:
148 name="SECAM-H"; break;
149 case V4L2_STD_SECAM_K:
150 name="SECAM-K"; break;
151 case V4L2_STD_SECAM_K1:
152 name="SECAM-K1"; break;
153 case V4L2_STD_SECAM_L:
154 name="SECAM-L"; break;
155 case V4L2_STD_SECAM_LC:
156 name="SECAM-LC"; break;
157 default:
158 name="Unknown"; break;
159 }
160
161 return name;
162}
163
164/* Fill in the fields of a v4l2_standard structure according to the
165 'id' and 'transmission' parameters. Returns negative on error. */
166int v4l2_video_std_construct(struct v4l2_standard *vs,
167 int id, char *name)
168{
169 u32 index = vs->index;
170
171 memset(vs, 0, sizeof(struct v4l2_standard));
172 vs->index = index;
173 vs->id = id;
174 if (id & V4L2_STD_525_60) {
175 vs->frameperiod.numerator = 1001;
176 vs->frameperiod.denominator = 30000;
177 vs->framelines = 525;
178 } else {
179 vs->frameperiod.numerator = 1;
180 vs->frameperiod.denominator = 25;
181 vs->framelines = 625;
182 }
183 strlcpy(vs->name,name,sizeof(vs->name));
184 return 0;
185}
186
187/* ----------------------------------------------------------------- */ 84/* ----------------------------------------------------------------- */
188/* priority handling */ 85/* priority handling */
189 86
@@ -196,6 +93,7 @@ int v4l2_prio_init(struct v4l2_prio_state *global)
196 memset(global,0,sizeof(*global)); 93 memset(global,0,sizeof(*global));
197 return 0; 94 return 0;
198} 95}
96EXPORT_SYMBOL(v4l2_prio_init);
199 97
200int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local, 98int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
201 enum v4l2_priority new) 99 enum v4l2_priority new)
@@ -211,11 +109,13 @@ int v4l2_prio_change(struct v4l2_prio_state *global, enum v4l2_priority *local,
211 *local = new; 109 *local = new;
212 return 0; 110 return 0;
213} 111}
112EXPORT_SYMBOL(v4l2_prio_change);
214 113
215int v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local) 114int v4l2_prio_open(struct v4l2_prio_state *global, enum v4l2_priority *local)
216{ 115{
217 return v4l2_prio_change(global,local,V4L2_PRIORITY_DEFAULT); 116 return v4l2_prio_change(global,local,V4L2_PRIORITY_DEFAULT);
218} 117}
118EXPORT_SYMBOL(v4l2_prio_open);
219 119
220int v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority *local) 120int v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority *local)
221{ 121{
@@ -223,6 +123,7 @@ int v4l2_prio_close(struct v4l2_prio_state *global, enum v4l2_priority *local)
223 atomic_dec(&global->prios[*local]); 123 atomic_dec(&global->prios[*local]);
224 return 0; 124 return 0;
225} 125}
126EXPORT_SYMBOL(v4l2_prio_close);
226 127
227enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global) 128enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global)
228{ 129{
@@ -234,6 +135,7 @@ enum v4l2_priority v4l2_prio_max(struct v4l2_prio_state *global)
234 return V4L2_PRIORITY_BACKGROUND; 135 return V4L2_PRIORITY_BACKGROUND;
235 return V4L2_PRIORITY_UNSET; 136 return V4L2_PRIORITY_UNSET;
236} 137}
138EXPORT_SYMBOL(v4l2_prio_max);
237 139
238int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority *local) 140int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority *local)
239{ 141{
@@ -241,225 +143,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority *local)
241 return -EBUSY; 143 return -EBUSY;
242 return 0; 144 return 0;
243} 145}
244 146EXPORT_SYMBOL(v4l2_prio_check);
245
246/* ----------------------------------------------------------------- */
247/* some arrays for pretty-printing debug messages of enum types */
248
249char *v4l2_field_names[] = {
250 [V4L2_FIELD_ANY] = "any",
251 [V4L2_FIELD_NONE] = "none",
252 [V4L2_FIELD_TOP] = "top",
253 [V4L2_FIELD_BOTTOM] = "bottom",
254 [V4L2_FIELD_INTERLACED] = "interlaced",
255 [V4L2_FIELD_SEQ_TB] = "seq-tb",
256 [V4L2_FIELD_SEQ_BT] = "seq-bt",
257 [V4L2_FIELD_ALTERNATE] = "alternate",
258 [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
259 [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
260};
261
262char *v4l2_type_names[] = {
263 [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "video-cap",
264 [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "video-over",
265 [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "video-out",
266 [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
267 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
268 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
269 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
270 [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "video-out-over",
271};
272
273
274#define prt_names(a,arr) (((a)>=0)&&((a)<ARRAY_SIZE(arr)))?arr[a]:"unknown"
275
276/* ------------------------------------------------------------------ */
277/* debug help functions */
278
279#ifdef CONFIG_VIDEO_V4L1_COMPAT
280static const char *v4l1_ioctls[] = {
281 [_IOC_NR(VIDIOCGCAP)] = "VIDIOCGCAP",
282 [_IOC_NR(VIDIOCGCHAN)] = "VIDIOCGCHAN",
283 [_IOC_NR(VIDIOCSCHAN)] = "VIDIOCSCHAN",
284 [_IOC_NR(VIDIOCGTUNER)] = "VIDIOCGTUNER",
285 [_IOC_NR(VIDIOCSTUNER)] = "VIDIOCSTUNER",
286 [_IOC_NR(VIDIOCGPICT)] = "VIDIOCGPICT",
287 [_IOC_NR(VIDIOCSPICT)] = "VIDIOCSPICT",
288 [_IOC_NR(VIDIOCCAPTURE)] = "VIDIOCCAPTURE",
289 [_IOC_NR(VIDIOCGWIN)] = "VIDIOCGWIN",
290 [_IOC_NR(VIDIOCSWIN)] = "VIDIOCSWIN",
291 [_IOC_NR(VIDIOCGFBUF)] = "VIDIOCGFBUF",
292 [_IOC_NR(VIDIOCSFBUF)] = "VIDIOCSFBUF",
293 [_IOC_NR(VIDIOCKEY)] = "VIDIOCKEY",
294 [_IOC_NR(VIDIOCGFREQ)] = "VIDIOCGFREQ",
295 [_IOC_NR(VIDIOCSFREQ)] = "VIDIOCSFREQ",
296 [_IOC_NR(VIDIOCGAUDIO)] = "VIDIOCGAUDIO",
297 [_IOC_NR(VIDIOCSAUDIO)] = "VIDIOCSAUDIO",
298 [_IOC_NR(VIDIOCSYNC)] = "VIDIOCSYNC",
299 [_IOC_NR(VIDIOCMCAPTURE)] = "VIDIOCMCAPTURE",
300 [_IOC_NR(VIDIOCGMBUF)] = "VIDIOCGMBUF",
301 [_IOC_NR(VIDIOCGUNIT)] = "VIDIOCGUNIT",
302 [_IOC_NR(VIDIOCGCAPTURE)] = "VIDIOCGCAPTURE",
303 [_IOC_NR(VIDIOCSCAPTURE)] = "VIDIOCSCAPTURE",
304 [_IOC_NR(VIDIOCSPLAYMODE)] = "VIDIOCSPLAYMODE",
305 [_IOC_NR(VIDIOCSWRITEMODE)] = "VIDIOCSWRITEMODE",
306 [_IOC_NR(VIDIOCGPLAYINFO)] = "VIDIOCGPLAYINFO",
307 [_IOC_NR(VIDIOCSMICROCODE)] = "VIDIOCSMICROCODE",
308 [_IOC_NR(VIDIOCGVBIFMT)] = "VIDIOCGVBIFMT",
309 [_IOC_NR(VIDIOCSVBIFMT)] = "VIDIOCSVBIFMT"
310};
311#define V4L1_IOCTLS ARRAY_SIZE(v4l1_ioctls)
312#endif
313
314static const char *v4l2_ioctls[] = {
315 [_IOC_NR(VIDIOC_QUERYCAP)] = "VIDIOC_QUERYCAP",
316 [_IOC_NR(VIDIOC_RESERVED)] = "VIDIOC_RESERVED",
317 [_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT",
318 [_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT",
319 [_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT",
320 [_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS",
321 [_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF",
322 [_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF",
323 [_IOC_NR(VIDIOC_S_FBUF)] = "VIDIOC_S_FBUF",
324 [_IOC_NR(VIDIOC_OVERLAY)] = "VIDIOC_OVERLAY",
325 [_IOC_NR(VIDIOC_QBUF)] = "VIDIOC_QBUF",
326 [_IOC_NR(VIDIOC_DQBUF)] = "VIDIOC_DQBUF",
327 [_IOC_NR(VIDIOC_STREAMON)] = "VIDIOC_STREAMON",
328 [_IOC_NR(VIDIOC_STREAMOFF)] = "VIDIOC_STREAMOFF",
329 [_IOC_NR(VIDIOC_G_PARM)] = "VIDIOC_G_PARM",
330 [_IOC_NR(VIDIOC_S_PARM)] = "VIDIOC_S_PARM",
331 [_IOC_NR(VIDIOC_G_STD)] = "VIDIOC_G_STD",
332 [_IOC_NR(VIDIOC_S_STD)] = "VIDIOC_S_STD",
333 [_IOC_NR(VIDIOC_ENUMSTD)] = "VIDIOC_ENUMSTD",
334 [_IOC_NR(VIDIOC_ENUMINPUT)] = "VIDIOC_ENUMINPUT",
335 [_IOC_NR(VIDIOC_G_CTRL)] = "VIDIOC_G_CTRL",
336 [_IOC_NR(VIDIOC_S_CTRL)] = "VIDIOC_S_CTRL",
337 [_IOC_NR(VIDIOC_G_TUNER)] = "VIDIOC_G_TUNER",
338 [_IOC_NR(VIDIOC_S_TUNER)] = "VIDIOC_S_TUNER",
339 [_IOC_NR(VIDIOC_G_AUDIO)] = "VIDIOC_G_AUDIO",
340 [_IOC_NR(VIDIOC_S_AUDIO)] = "VIDIOC_S_AUDIO",
341 [_IOC_NR(VIDIOC_QUERYCTRL)] = "VIDIOC_QUERYCTRL",
342 [_IOC_NR(VIDIOC_QUERYMENU)] = "VIDIOC_QUERYMENU",
343 [_IOC_NR(VIDIOC_G_INPUT)] = "VIDIOC_G_INPUT",
344 [_IOC_NR(VIDIOC_S_INPUT)] = "VIDIOC_S_INPUT",
345 [_IOC_NR(VIDIOC_G_OUTPUT)] = "VIDIOC_G_OUTPUT",
346 [_IOC_NR(VIDIOC_S_OUTPUT)] = "VIDIOC_S_OUTPUT",
347 [_IOC_NR(VIDIOC_ENUMOUTPUT)] = "VIDIOC_ENUMOUTPUT",
348 [_IOC_NR(VIDIOC_G_AUDOUT)] = "VIDIOC_G_AUDOUT",
349 [_IOC_NR(VIDIOC_S_AUDOUT)] = "VIDIOC_S_AUDOUT",
350 [_IOC_NR(VIDIOC_G_MODULATOR)] = "VIDIOC_G_MODULATOR",
351 [_IOC_NR(VIDIOC_S_MODULATOR)] = "VIDIOC_S_MODULATOR",
352 [_IOC_NR(VIDIOC_G_FREQUENCY)] = "VIDIOC_G_FREQUENCY",
353 [_IOC_NR(VIDIOC_S_FREQUENCY)] = "VIDIOC_S_FREQUENCY",
354 [_IOC_NR(VIDIOC_CROPCAP)] = "VIDIOC_CROPCAP",
355 [_IOC_NR(VIDIOC_G_CROP)] = "VIDIOC_G_CROP",
356 [_IOC_NR(VIDIOC_S_CROP)] = "VIDIOC_S_CROP",
357 [_IOC_NR(VIDIOC_G_JPEGCOMP)] = "VIDIOC_G_JPEGCOMP",
358 [_IOC_NR(VIDIOC_S_JPEGCOMP)] = "VIDIOC_S_JPEGCOMP",
359 [_IOC_NR(VIDIOC_QUERYSTD)] = "VIDIOC_QUERYSTD",
360 [_IOC_NR(VIDIOC_TRY_FMT)] = "VIDIOC_TRY_FMT",
361 [_IOC_NR(VIDIOC_ENUMAUDIO)] = "VIDIOC_ENUMAUDIO",
362 [_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT",
363 [_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY",
364 [_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY",
365 [_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP",
366 [_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS",
367 [_IOC_NR(VIDIOC_G_EXT_CTRLS)] = "VIDIOC_G_EXT_CTRLS",
368 [_IOC_NR(VIDIOC_S_EXT_CTRLS)] = "VIDIOC_S_EXT_CTRLS",
369 [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS",
370#if 1
371 [_IOC_NR(VIDIOC_ENUM_FRAMESIZES)] = "VIDIOC_ENUM_FRAMESIZES",
372 [_IOC_NR(VIDIOC_ENUM_FRAMEINTERVALS)] = "VIDIOC_ENUM_FRAMEINTERVALS",
373 [_IOC_NR(VIDIOC_G_ENC_INDEX)] = "VIDIOC_G_ENC_INDEX",
374 [_IOC_NR(VIDIOC_ENCODER_CMD)] = "VIDIOC_ENCODER_CMD",
375 [_IOC_NR(VIDIOC_TRY_ENCODER_CMD)] = "VIDIOC_TRY_ENCODER_CMD",
376
377 [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
378 [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
379
380 [_IOC_NR(VIDIOC_G_CHIP_IDENT)] = "VIDIOC_G_CHIP_IDENT",
381#endif
382};
383#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
384
385static const char *v4l2_int_ioctls[] = {
386#ifdef CONFIG_VIDEO_V4L1_COMPAT
387 [_IOC_NR(DECODER_GET_CAPABILITIES)] = "DECODER_GET_CAPABILITIES",
388 [_IOC_NR(DECODER_GET_STATUS)] = "DECODER_GET_STATUS",
389 [_IOC_NR(DECODER_SET_NORM)] = "DECODER_SET_NORM",
390 [_IOC_NR(DECODER_SET_INPUT)] = "DECODER_SET_INPUT",
391 [_IOC_NR(DECODER_SET_OUTPUT)] = "DECODER_SET_OUTPUT",
392 [_IOC_NR(DECODER_ENABLE_OUTPUT)] = "DECODER_ENABLE_OUTPUT",
393 [_IOC_NR(DECODER_SET_PICTURE)] = "DECODER_SET_PICTURE",
394 [_IOC_NR(DECODER_SET_GPIO)] = "DECODER_SET_GPIO",
395 [_IOC_NR(DECODER_INIT)] = "DECODER_INIT",
396 [_IOC_NR(DECODER_SET_VBI_BYPASS)] = "DECODER_SET_VBI_BYPASS",
397 [_IOC_NR(DECODER_DUMP)] = "DECODER_DUMP",
398#endif
399 [_IOC_NR(AUDC_SET_RADIO)] = "AUDC_SET_RADIO",
400
401 [_IOC_NR(TUNER_SET_TYPE_ADDR)] = "TUNER_SET_TYPE_ADDR",
402 [_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY",
403 [_IOC_NR(TUNER_SET_CONFIG)] = "TUNER_SET_CONFIG",
404
405 [_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE",
406 [_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET",
407 [_IOC_NR(VIDIOC_INT_AUDIO_CLOCK_FREQ)] = "VIDIOC_INT_AUDIO_CLOCK_FREQ",
408 [_IOC_NR(VIDIOC_INT_DECODE_VBI_LINE)] = "VIDIOC_INT_DECODE_VBI_LINE",
409 [_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA",
410 [_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA",
411 [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ",
412 [_IOC_NR(VIDIOC_INT_S_STANDBY)] = "VIDIOC_INT_S_STANDBY",
413 [_IOC_NR(VIDIOC_INT_S_AUDIO_ROUTING)] = "VIDIOC_INT_S_AUDIO_ROUTING",
414 [_IOC_NR(VIDIOC_INT_G_AUDIO_ROUTING)] = "VIDIOC_INT_G_AUDIO_ROUTING",
415 [_IOC_NR(VIDIOC_INT_S_VIDEO_ROUTING)] = "VIDIOC_INT_S_VIDEO_ROUTING",
416 [_IOC_NR(VIDIOC_INT_G_VIDEO_ROUTING)] = "VIDIOC_INT_G_VIDEO_ROUTING",
417 [_IOC_NR(VIDIOC_INT_S_CRYSTAL_FREQ)] = "VIDIOC_INT_S_CRYSTAL_FREQ",
418 [_IOC_NR(VIDIOC_INT_INIT)] = "VIDIOC_INT_INIT",
419 [_IOC_NR(VIDIOC_INT_G_STD_OUTPUT)] = "VIDIOC_INT_G_STD_OUTPUT",
420 [_IOC_NR(VIDIOC_INT_S_STD_OUTPUT)] = "VIDIOC_INT_S_STD_OUTPUT",
421};
422#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls)
423
424
425/* Common ioctl debug function. This function can be used by
426 external ioctl messages as well as internal V4L ioctl */
427void v4l_printk_ioctl(unsigned int cmd)
428{
429 char *dir;
430
431 switch (_IOC_DIR(cmd)) {
432 case _IOC_NONE: dir = "--"; break;
433 case _IOC_READ: dir = "r-"; break;
434 case _IOC_WRITE: dir = "-w"; break;
435 case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
436 default: dir = "*ERR*"; break;
437 }
438 switch (_IOC_TYPE(cmd)) {
439 case 'd':
440 printk("v4l2_int ioctl %s, dir=%s (0x%08x)\n",
441 (_IOC_NR(cmd) < V4L2_INT_IOCTLS) ?
442 v4l2_int_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
443 break;
444#ifdef CONFIG_VIDEO_V4L1_COMPAT
445 case 'v':
446 printk("v4l1 ioctl %s, dir=%s (0x%08x)\n",
447 (_IOC_NR(cmd) < V4L1_IOCTLS) ?
448 v4l1_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
449 break;
450#endif
451 case 'V':
452 printk("v4l2 ioctl %s, dir=%s (0x%08x)\n",
453 (_IOC_NR(cmd) < V4L2_IOCTLS) ?
454 v4l2_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
455 break;
456
457 default:
458 printk("unknown ioctl '%c', dir=%s, #%d (0x%08x)\n",
459 _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
460 }
461}
462
463 147
464/* ----------------------------------------------------------------- */ 148/* ----------------------------------------------------------------- */
465 149
@@ -488,6 +172,7 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
488 } 172 }
489 return 0; 173 return 0;
490} 174}
175EXPORT_SYMBOL(v4l2_ctrl_check);
491 176
492/* Returns NULL or a character pointer array containing the menu for 177/* Returns NULL or a character pointer array containing the menu for
493 the given control ID. The pointer array ends with a NULL pointer. 178 the given control ID. The pointer array ends with a NULL pointer.
@@ -648,6 +333,7 @@ const char **v4l2_ctrl_get_menu(u32 id)
648 return NULL; 333 return NULL;
649 } 334 }
650} 335}
336EXPORT_SYMBOL(v4l2_ctrl_get_menu);
651 337
652/* Fill in a struct v4l2_queryctrl */ 338/* Fill in a struct v4l2_queryctrl */
653int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def) 339int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def)
@@ -770,6 +456,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 ste
770 snprintf(qctrl->name, sizeof(qctrl->name), name); 456 snprintf(qctrl->name, sizeof(qctrl->name), name);
771 return 0; 457 return 0;
772} 458}
459EXPORT_SYMBOL(v4l2_ctrl_query_fill);
773 460
774/* Fill in a struct v4l2_queryctrl with standard values based on 461/* Fill in a struct v4l2_queryctrl with standard values based on
775 the control ID. */ 462 the control ID. */
@@ -904,6 +591,7 @@ int v4l2_ctrl_query_fill_std(struct v4l2_queryctrl *qctrl)
904 return -EINVAL; 591 return -EINVAL;
905 } 592 }
906} 593}
594EXPORT_SYMBOL(v4l2_ctrl_query_fill_std);
907 595
908/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and 596/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and
909 the menu. The qctrl pointer may be NULL, in which case it is ignored. */ 597 the menu. The qctrl pointer may be NULL, in which case it is ignored. */
@@ -922,6 +610,7 @@ int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qc
922 qmenu->reserved = 0; 610 qmenu->reserved = 0;
923 return 0; 611 return 0;
924} 612}
613EXPORT_SYMBOL(v4l2_ctrl_query_menu);
925 614
926/* ctrl_classes points to an array of u32 pointers, the last element is 615/* ctrl_classes points to an array of u32 pointers, the last element is
927 a NULL pointer. Each u32 array is a 0-terminated array of control IDs. 616 a NULL pointer. Each u32 array is a 0-terminated array of control IDs.
@@ -972,7 +661,20 @@ u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
972 return 0; 661 return 0;
973 return **ctrl_classes; 662 return **ctrl_classes;
974} 663}
664EXPORT_SYMBOL(v4l2_ctrl_next);
975 665
666int v4l2_chip_match_host(u32 match_type, u32 match_chip)
667{
668 switch (match_type) {
669 case V4L2_CHIP_MATCH_HOST:
670 return match_chip == 0;
671 default:
672 return 0;
673 }
674}
675EXPORT_SYMBOL(v4l2_chip_match_host);
676
677#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
976int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 match_type, u32 match_chip) 678int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 match_type, u32 match_chip)
977{ 679{
978 switch (match_type) { 680 switch (match_type) {
@@ -984,6 +686,7 @@ int v4l2_chip_match_i2c_client(struct i2c_client *c, u32 match_type, u32 match_c
984 return 0; 686 return 0;
985 } 687 }
986} 688}
689EXPORT_SYMBOL(v4l2_chip_match_i2c_client);
987 690
988int v4l2_chip_ident_i2c_client(struct i2c_client *c, struct v4l2_chip_ident *chip, 691int v4l2_chip_ident_i2c_client(struct i2c_client *c, struct v4l2_chip_ident *chip,
989 u32 ident, u32 revision) 692 u32 ident, u32 revision)
@@ -1000,16 +703,7 @@ int v4l2_chip_ident_i2c_client(struct i2c_client *c, struct v4l2_chip_ident *chi
1000 } 703 }
1001 return 0; 704 return 0;
1002} 705}
1003 706EXPORT_SYMBOL(v4l2_chip_ident_i2c_client);
1004int v4l2_chip_match_host(u32 match_type, u32 match_chip)
1005{
1006 switch (match_type) {
1007 case V4L2_CHIP_MATCH_HOST:
1008 return match_chip == 0;
1009 default:
1010 return 0;
1011 }
1012}
1013 707
1014/* ----------------------------------------------------------------- */ 708/* ----------------------------------------------------------------- */
1015 709
@@ -1038,38 +732,5 @@ int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver
1038 } 732 }
1039 return err != -ENOMEM ? 0 : err; 733 return err != -ENOMEM ? 0 : err;
1040} 734}
1041
1042/* ----------------------------------------------------------------- */
1043
1044EXPORT_SYMBOL(v4l2_norm_to_name);
1045EXPORT_SYMBOL(v4l2_video_std_construct);
1046
1047EXPORT_SYMBOL(v4l2_prio_init);
1048EXPORT_SYMBOL(v4l2_prio_change);
1049EXPORT_SYMBOL(v4l2_prio_open);
1050EXPORT_SYMBOL(v4l2_prio_close);
1051EXPORT_SYMBOL(v4l2_prio_max);
1052EXPORT_SYMBOL(v4l2_prio_check);
1053
1054EXPORT_SYMBOL(v4l2_field_names);
1055EXPORT_SYMBOL(v4l2_type_names);
1056EXPORT_SYMBOL(v4l_printk_ioctl);
1057
1058EXPORT_SYMBOL(v4l2_ctrl_next);
1059EXPORT_SYMBOL(v4l2_ctrl_check);
1060EXPORT_SYMBOL(v4l2_ctrl_get_menu);
1061EXPORT_SYMBOL(v4l2_ctrl_query_menu);
1062EXPORT_SYMBOL(v4l2_ctrl_query_fill);
1063EXPORT_SYMBOL(v4l2_ctrl_query_fill_std);
1064
1065EXPORT_SYMBOL(v4l2_chip_match_i2c_client);
1066EXPORT_SYMBOL(v4l2_chip_ident_i2c_client);
1067EXPORT_SYMBOL(v4l2_chip_match_host);
1068
1069EXPORT_SYMBOL(v4l2_i2c_attach); 735EXPORT_SYMBOL(v4l2_i2c_attach);
1070 736#endif
1071/*
1072 * Local variables:
1073 * c-basic-offset: 8
1074 * End:
1075 */
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index 80a14da9acef..eab79ffdf56a 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -147,7 +147,7 @@ void videobuf_queue_core_init(struct videobuf_queue *q,
147 /* Having implementations for abstract methods are mandatory */ 147 /* Having implementations for abstract methods are mandatory */
148 BUG_ON(!q->int_ops); 148 BUG_ON(!q->int_ops);
149 149
150 mutex_init(&q->lock); 150 mutex_init(&q->vb_lock);
151 INIT_LIST_HEAD(&q->stream); 151 INIT_LIST_HEAD(&q->stream);
152} 152}
153 153
@@ -189,7 +189,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q)
189 return 0; 189 return 0;
190} 190}
191 191
192/* Locking: Caller holds q->lock */ 192/* Locking: Caller holds q->vb_lock */
193void videobuf_queue_cancel(struct videobuf_queue *q) 193void videobuf_queue_cancel(struct videobuf_queue *q)
194{ 194{
195 unsigned long flags = 0; 195 unsigned long flags = 0;
@@ -220,7 +220,7 @@ void videobuf_queue_cancel(struct videobuf_queue *q)
220 220
221/* --------------------------------------------------------------------- */ 221/* --------------------------------------------------------------------- */
222 222
223/* Locking: Caller holds q->lock */ 223/* Locking: Caller holds q->vb_lock */
224enum v4l2_field videobuf_next_field(struct videobuf_queue *q) 224enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
225{ 225{
226 enum v4l2_field field = q->field; 226 enum v4l2_field field = q->field;
@@ -239,7 +239,7 @@ enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
239 return field; 239 return field;
240} 240}
241 241
242/* Locking: Caller holds q->lock */ 242/* Locking: Caller holds q->vb_lock */
243static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b, 243static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
244 struct videobuf_buffer *vb, enum v4l2_buf_type type) 244 struct videobuf_buffer *vb, enum v4l2_buf_type type)
245{ 245{
@@ -295,7 +295,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
295 b->sequence = vb->field_count >> 1; 295 b->sequence = vb->field_count >> 1;
296} 296}
297 297
298/* Locking: Caller holds q->lock */ 298/* Locking: Caller holds q->vb_lock */
299static int __videobuf_mmap_free(struct videobuf_queue *q) 299static int __videobuf_mmap_free(struct videobuf_queue *q)
300{ 300{
301 int i; 301 int i;
@@ -328,13 +328,13 @@ static int __videobuf_mmap_free(struct videobuf_queue *q)
328int videobuf_mmap_free(struct videobuf_queue *q) 328int videobuf_mmap_free(struct videobuf_queue *q)
329{ 329{
330 int ret; 330 int ret;
331 mutex_lock(&q->lock); 331 mutex_lock(&q->vb_lock);
332 ret = __videobuf_mmap_free(q); 332 ret = __videobuf_mmap_free(q);
333 mutex_unlock(&q->lock); 333 mutex_unlock(&q->vb_lock);
334 return ret; 334 return ret;
335} 335}
336 336
337/* Locking: Caller holds q->lock */ 337/* Locking: Caller holds q->vb_lock */
338static int __videobuf_mmap_setup(struct videobuf_queue *q, 338static int __videobuf_mmap_setup(struct videobuf_queue *q,
339 unsigned int bcount, unsigned int bsize, 339 unsigned int bcount, unsigned int bsize,
340 enum v4l2_memory memory) 340 enum v4l2_memory memory)
@@ -384,9 +384,9 @@ int videobuf_mmap_setup(struct videobuf_queue *q,
384 enum v4l2_memory memory) 384 enum v4l2_memory memory)
385{ 385{
386 int ret; 386 int ret;
387 mutex_lock(&q->lock); 387 mutex_lock(&q->vb_lock);
388 ret = __videobuf_mmap_setup(q, bcount, bsize, memory); 388 ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
389 mutex_unlock(&q->lock); 389 mutex_unlock(&q->vb_lock);
390 return ret; 390 return ret;
391} 391}
392 392
@@ -408,7 +408,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
408 return -EINVAL; 408 return -EINVAL;
409 } 409 }
410 410
411 mutex_lock(&q->lock); 411 mutex_lock(&q->vb_lock);
412 if (req->type != q->type) { 412 if (req->type != q->type) {
413 dprintk(1, "reqbufs: queue type invalid\n"); 413 dprintk(1, "reqbufs: queue type invalid\n");
414 retval = -EINVAL; 414 retval = -EINVAL;
@@ -444,7 +444,7 @@ int videobuf_reqbufs(struct videobuf_queue *q,
444 req->count = retval; 444 req->count = retval;
445 445
446 done: 446 done:
447 mutex_unlock(&q->lock); 447 mutex_unlock(&q->vb_lock);
448 return retval; 448 return retval;
449} 449}
450 450
@@ -452,7 +452,7 @@ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
452{ 452{
453 int ret = -EINVAL; 453 int ret = -EINVAL;
454 454
455 mutex_lock(&q->lock); 455 mutex_lock(&q->vb_lock);
456 if (unlikely(b->type != q->type)) { 456 if (unlikely(b->type != q->type)) {
457 dprintk(1, "querybuf: Wrong type.\n"); 457 dprintk(1, "querybuf: Wrong type.\n");
458 goto done; 458 goto done;
@@ -470,7 +470,7 @@ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
470 470
471 ret = 0; 471 ret = 0;
472done: 472done:
473 mutex_unlock(&q->lock); 473 mutex_unlock(&q->vb_lock);
474 return ret; 474 return ret;
475} 475}
476 476
@@ -487,7 +487,7 @@ int videobuf_qbuf(struct videobuf_queue *q,
487 if (b->memory == V4L2_MEMORY_MMAP) 487 if (b->memory == V4L2_MEMORY_MMAP)
488 down_read(&current->mm->mmap_sem); 488 down_read(&current->mm->mmap_sem);
489 489
490 mutex_lock(&q->lock); 490 mutex_lock(&q->vb_lock);
491 retval = -EBUSY; 491 retval = -EBUSY;
492 if (q->reading) { 492 if (q->reading) {
493 dprintk(1, "qbuf: Reading running...\n"); 493 dprintk(1, "qbuf: Reading running...\n");
@@ -573,7 +573,7 @@ int videobuf_qbuf(struct videobuf_queue *q,
573 retval = 0; 573 retval = 0;
574 574
575 done: 575 done:
576 mutex_unlock(&q->lock); 576 mutex_unlock(&q->vb_lock);
577 577
578 if (b->memory == V4L2_MEMORY_MMAP) 578 if (b->memory == V4L2_MEMORY_MMAP)
579 up_read(&current->mm->mmap_sem); 579 up_read(&current->mm->mmap_sem);
@@ -589,7 +589,7 @@ int videobuf_dqbuf(struct videobuf_queue *q,
589 589
590 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 590 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
591 591
592 mutex_lock(&q->lock); 592 mutex_lock(&q->vb_lock);
593 retval = -EBUSY; 593 retval = -EBUSY;
594 if (q->reading) { 594 if (q->reading) {
595 dprintk(1, "dqbuf: Reading running...\n"); 595 dprintk(1, "dqbuf: Reading running...\n");
@@ -632,7 +632,7 @@ int videobuf_dqbuf(struct videobuf_queue *q,
632 videobuf_status(q, b, buf, q->type); 632 videobuf_status(q, b, buf, q->type);
633 633
634 done: 634 done:
635 mutex_unlock(&q->lock); 635 mutex_unlock(&q->vb_lock);
636 return retval; 636 return retval;
637} 637}
638 638
@@ -642,7 +642,7 @@ int videobuf_streamon(struct videobuf_queue *q)
642 unsigned long flags = 0; 642 unsigned long flags = 0;
643 int retval; 643 int retval;
644 644
645 mutex_lock(&q->lock); 645 mutex_lock(&q->vb_lock);
646 retval = -EBUSY; 646 retval = -EBUSY;
647 if (q->reading) 647 if (q->reading)
648 goto done; 648 goto done;
@@ -659,11 +659,11 @@ int videobuf_streamon(struct videobuf_queue *q)
659 spin_unlock_irqrestore(q->irqlock, flags); 659 spin_unlock_irqrestore(q->irqlock, flags);
660 660
661 done: 661 done:
662 mutex_unlock(&q->lock); 662 mutex_unlock(&q->vb_lock);
663 return retval; 663 return retval;
664} 664}
665 665
666/* Locking: Caller holds q->lock */ 666/* Locking: Caller holds q->vb_lock */
667static int __videobuf_streamoff(struct videobuf_queue *q) 667static int __videobuf_streamoff(struct videobuf_queue *q)
668{ 668{
669 if (!q->streaming) 669 if (!q->streaming)
@@ -679,14 +679,14 @@ int videobuf_streamoff(struct videobuf_queue *q)
679{ 679{
680 int retval; 680 int retval;
681 681
682 mutex_lock(&q->lock); 682 mutex_lock(&q->vb_lock);
683 retval = __videobuf_streamoff(q); 683 retval = __videobuf_streamoff(q);
684 mutex_unlock(&q->lock); 684 mutex_unlock(&q->vb_lock);
685 685
686 return retval; 686 return retval;
687} 687}
688 688
689/* Locking: Caller holds q->lock */ 689/* Locking: Caller holds q->vb_lock */
690static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q, 690static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
691 char __user *data, 691 char __user *data,
692 size_t count, loff_t *ppos) 692 size_t count, loff_t *ppos)
@@ -745,7 +745,7 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
745 745
746 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 746 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
747 747
748 mutex_lock(&q->lock); 748 mutex_lock(&q->vb_lock);
749 749
750 nbufs = 1; size = 0; 750 nbufs = 1; size = 0;
751 q->ops->buf_setup(q, &nbufs, &size); 751 q->ops->buf_setup(q, &nbufs, &size);
@@ -817,11 +817,11 @@ ssize_t videobuf_read_one(struct videobuf_queue *q,
817 } 817 }
818 818
819 done: 819 done:
820 mutex_unlock(&q->lock); 820 mutex_unlock(&q->vb_lock);
821 return retval; 821 return retval;
822} 822}
823 823
824/* Locking: Caller holds q->lock */ 824/* Locking: Caller holds q->vb_lock */
825static int __videobuf_read_start(struct videobuf_queue *q) 825static int __videobuf_read_start(struct videobuf_queue *q)
826{ 826{
827 enum v4l2_field field; 827 enum v4l2_field field;
@@ -882,23 +882,23 @@ int videobuf_read_start(struct videobuf_queue *q)
882{ 882{
883 int rc; 883 int rc;
884 884
885 mutex_lock(&q->lock); 885 mutex_lock(&q->vb_lock);
886 rc = __videobuf_read_start(q); 886 rc = __videobuf_read_start(q);
887 mutex_unlock(&q->lock); 887 mutex_unlock(&q->vb_lock);
888 888
889 return rc; 889 return rc;
890} 890}
891 891
892void videobuf_read_stop(struct videobuf_queue *q) 892void videobuf_read_stop(struct videobuf_queue *q)
893{ 893{
894 mutex_lock(&q->lock); 894 mutex_lock(&q->vb_lock);
895 __videobuf_read_stop(q); 895 __videobuf_read_stop(q);
896 mutex_unlock(&q->lock); 896 mutex_unlock(&q->vb_lock);
897} 897}
898 898
899void videobuf_stop(struct videobuf_queue *q) 899void videobuf_stop(struct videobuf_queue *q)
900{ 900{
901 mutex_lock(&q->lock); 901 mutex_lock(&q->vb_lock);
902 902
903 if (q->streaming) 903 if (q->streaming)
904 __videobuf_streamoff(q); 904 __videobuf_streamoff(q);
@@ -906,7 +906,7 @@ void videobuf_stop(struct videobuf_queue *q)
906 if (q->reading) 906 if (q->reading)
907 __videobuf_read_stop(q); 907 __videobuf_read_stop(q);
908 908
909 mutex_unlock(&q->lock); 909 mutex_unlock(&q->vb_lock);
910} 910}
911 911
912 912
@@ -920,7 +920,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
920 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 920 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
921 921
922 dprintk(2, "%s\n", __FUNCTION__); 922 dprintk(2, "%s\n", __FUNCTION__);
923 mutex_lock(&q->lock); 923 mutex_lock(&q->vb_lock);
924 retval = -EBUSY; 924 retval = -EBUSY;
925 if (q->streaming) 925 if (q->streaming)
926 goto done; 926 goto done;
@@ -980,7 +980,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
980 } 980 }
981 981
982 done: 982 done:
983 mutex_unlock(&q->lock); 983 mutex_unlock(&q->vb_lock);
984 return retval; 984 return retval;
985} 985}
986 986
@@ -991,7 +991,7 @@ unsigned int videobuf_poll_stream(struct file *file,
991 struct videobuf_buffer *buf = NULL; 991 struct videobuf_buffer *buf = NULL;
992 unsigned int rc = 0; 992 unsigned int rc = 0;
993 993
994 mutex_lock(&q->lock); 994 mutex_lock(&q->vb_lock);
995 if (q->streaming) { 995 if (q->streaming) {
996 if (!list_empty(&q->stream)) 996 if (!list_empty(&q->stream))
997 buf = list_entry(q->stream.next, 997 buf = list_entry(q->stream.next,
@@ -1019,7 +1019,7 @@ unsigned int videobuf_poll_stream(struct file *file,
1019 buf->state == VIDEOBUF_ERROR) 1019 buf->state == VIDEOBUF_ERROR)
1020 rc = POLLIN|POLLRDNORM; 1020 rc = POLLIN|POLLRDNORM;
1021 } 1021 }
1022 mutex_unlock(&q->lock); 1022 mutex_unlock(&q->vb_lock);
1023 return rc; 1023 return rc;
1024} 1024}
1025 1025
@@ -1030,10 +1030,10 @@ int videobuf_mmap_mapper(struct videobuf_queue *q,
1030 1030
1031 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); 1031 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1032 1032
1033 mutex_lock(&q->lock); 1033 mutex_lock(&q->vb_lock);
1034 retval = CALL(q, mmap_mapper, q, vma); 1034 retval = CALL(q, mmap_mapper, q, vma);
1035 q->is_mmapped = 1; 1035 q->is_mmapped = 1;
1036 mutex_unlock(&q->lock); 1036 mutex_unlock(&q->vb_lock);
1037 1037
1038 return retval; 1038 return retval;
1039} 1039}
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index 98efd7ab1f50..53fed4b74ce9 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -356,7 +356,7 @@ videobuf_vm_close(struct vm_area_struct *vma)
356 map->count--; 356 map->count--;
357 if (0 == map->count) { 357 if (0 == map->count) {
358 dprintk(1,"munmap %p q=%p\n",map,q); 358 dprintk(1,"munmap %p q=%p\n",map,q);
359 mutex_lock(&q->lock); 359 mutex_lock(&q->vb_lock);
360 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 360 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
361 if (NULL == q->bufs[i]) 361 if (NULL == q->bufs[i])
362 continue; 362 continue;
@@ -373,7 +373,7 @@ videobuf_vm_close(struct vm_area_struct *vma)
373 q->bufs[i]->baddr = 0; 373 q->bufs[i]->baddr = 0;
374 q->ops->buf_release(q,q->bufs[i]); 374 q->ops->buf_release(q,q->bufs[i]);
375 } 375 }
376 mutex_unlock(&q->lock); 376 mutex_unlock(&q->vb_lock);
377 kfree(map); 377 kfree(map);
378 } 378 }
379 return; 379 return;
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index 9b3898347ca5..5266ecc91dab 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -70,7 +70,7 @@ videobuf_vm_close(struct vm_area_struct *vma)
70 map->count--; 70 map->count--;
71 if (0 == map->count) { 71 if (0 == map->count) {
72 dprintk(1,"munmap %p q=%p\n",map,q); 72 dprintk(1,"munmap %p q=%p\n",map,q);
73 mutex_lock(&q->lock); 73 mutex_lock(&q->vb_lock);
74 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 74 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
75 if (NULL == q->bufs[i]) 75 if (NULL == q->bufs[i])
76 continue; 76 continue;
@@ -83,7 +83,7 @@ videobuf_vm_close(struct vm_area_struct *vma)
83 q->bufs[i]->map = NULL; 83 q->bufs[i]->map = NULL;
84 q->bufs[i]->baddr = 0; 84 q->bufs[i]->baddr = 0;
85 } 85 }
86 mutex_unlock(&q->lock); 86 mutex_unlock(&q->vb_lock);
87 kfree(map); 87 kfree(map);
88 } 88 }
89 return; 89 return;
@@ -107,7 +107,7 @@ static struct vm_operations_struct videobuf_vm_ops =
107 107
108static void *__videobuf_alloc(size_t size) 108static void *__videobuf_alloc(size_t size)
109{ 109{
110 struct videbuf_vmalloc_memory *mem; 110 struct videobuf_vmalloc_memory *mem;
111 struct videobuf_buffer *vb; 111 struct videobuf_buffer *vb;
112 112
113 vb = kzalloc(size+sizeof(*mem),GFP_KERNEL); 113 vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);
@@ -127,9 +127,7 @@ static int __videobuf_iolock (struct videobuf_queue* q,
127 struct v4l2_framebuffer *fbuf) 127 struct v4l2_framebuffer *fbuf)
128{ 128{
129 int pages; 129 int pages;
130 130 struct videobuf_vmalloc_memory *mem=vb->priv;
131 struct videbuf_vmalloc_memory *mem=vb->priv;
132
133 131
134 BUG_ON(!mem); 132 BUG_ON(!mem);
135 133
@@ -195,7 +193,7 @@ static int __videobuf_mmap_free(struct videobuf_queue *q)
195static int __videobuf_mmap_mapper(struct videobuf_queue *q, 193static int __videobuf_mmap_mapper(struct videobuf_queue *q,
196 struct vm_area_struct *vma) 194 struct vm_area_struct *vma)
197{ 195{
198 struct videbuf_vmalloc_memory *mem; 196 struct videobuf_vmalloc_memory *mem;
199 struct videobuf_mapping *map; 197 struct videobuf_mapping *map;
200 unsigned int first; 198 unsigned int first;
201 int retval; 199 int retval;
@@ -267,7 +265,7 @@ static int __videobuf_copy_to_user ( struct videobuf_queue *q,
267 char __user *data, size_t count, 265 char __user *data, size_t count,
268 int nonblocking ) 266 int nonblocking )
269{ 267{
270 struct videbuf_vmalloc_memory *mem=q->read_buf->priv; 268 struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
271 BUG_ON (!mem); 269 BUG_ON (!mem);
272 MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM); 270 MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
273 271
@@ -288,7 +286,7 @@ static int __videobuf_copy_stream ( struct videobuf_queue *q,
288 int vbihack, int nonblocking ) 286 int vbihack, int nonblocking )
289{ 287{
290 unsigned int *fc; 288 unsigned int *fc;
291 struct videbuf_vmalloc_memory *mem=q->read_buf->priv; 289 struct videobuf_vmalloc_memory *mem=q->read_buf->priv;
292 BUG_ON (!mem); 290 BUG_ON (!mem);
293 MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM); 291 MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
294 292
@@ -341,7 +339,7 @@ EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
341 339
342void *videobuf_to_vmalloc (struct videobuf_buffer *buf) 340void *videobuf_to_vmalloc (struct videobuf_buffer *buf)
343{ 341{
344 struct videbuf_vmalloc_memory *mem=buf->priv; 342 struct videobuf_vmalloc_memory *mem=buf->priv;
345 BUG_ON (!mem); 343 BUG_ON (!mem);
346 MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM); 344 MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
347 345
@@ -351,7 +349,7 @@ EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
351 349
352void videobuf_vmalloc_free (struct videobuf_buffer *buf) 350void videobuf_vmalloc_free (struct videobuf_buffer *buf)
353{ 351{
354 struct videbuf_vmalloc_memory *mem=buf->priv; 352 struct videobuf_vmalloc_memory *mem=buf->priv;
355 BUG_ON (!mem); 353 BUG_ON (!mem);
356 354
357 MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM); 355 MAGIC_CHECK(mem->magic,MAGIC_VMAL_MEM);
diff --git a/drivers/media/video/videodev.c b/drivers/media/video/videodev.c
index 28655f8983c6..0d9b63762a48 100644
--- a/drivers/media/video/videodev.c
+++ b/drivers/media/video/videodev.c
@@ -46,10 +46,373 @@
46#include <linux/videodev.h> 46#include <linux/videodev.h>
47#endif 47#endif
48#include <media/v4l2-common.h> 48#include <media/v4l2-common.h>
49#include <linux/video_decoder.h>
49 50
50#define VIDEO_NUM_DEVICES 256 51#define VIDEO_NUM_DEVICES 256
51#define VIDEO_NAME "video4linux" 52#define VIDEO_NAME "video4linux"
52 53
54/* video4linux standard ID conversion to standard name
55 */
56char *v4l2_norm_to_name(v4l2_std_id id)
57{
58 char *name;
59 u32 myid = id;
60
61 /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
62 64 bit comparations. So, on that architecture, with some gcc
63 variants, compilation fails. Currently, the max value is 30bit wide.
64 */
65 BUG_ON(myid != id);
66
67 switch (myid) {
68 case V4L2_STD_PAL:
69 name = "PAL";
70 break;
71 case V4L2_STD_PAL_BG:
72 name = "PAL-BG";
73 break;
74 case V4L2_STD_PAL_DK:
75 name = "PAL-DK";
76 break;
77 case V4L2_STD_PAL_B:
78 name = "PAL-B";
79 break;
80 case V4L2_STD_PAL_B1:
81 name = "PAL-B1";
82 break;
83 case V4L2_STD_PAL_G:
84 name = "PAL-G";
85 break;
86 case V4L2_STD_PAL_H:
87 name = "PAL-H";
88 break;
89 case V4L2_STD_PAL_I:
90 name = "PAL-I";
91 break;
92 case V4L2_STD_PAL_D:
93 name = "PAL-D";
94 break;
95 case V4L2_STD_PAL_D1:
96 name = "PAL-D1";
97 break;
98 case V4L2_STD_PAL_K:
99 name = "PAL-K";
100 break;
101 case V4L2_STD_PAL_M:
102 name = "PAL-M";
103 break;
104 case V4L2_STD_PAL_N:
105 name = "PAL-N";
106 break;
107 case V4L2_STD_PAL_Nc:
108 name = "PAL-Nc";
109 break;
110 case V4L2_STD_PAL_60:
111 name = "PAL-60";
112 break;
113 case V4L2_STD_NTSC:
114 name = "NTSC";
115 break;
116 case V4L2_STD_NTSC_M:
117 name = "NTSC-M";
118 break;
119 case V4L2_STD_NTSC_M_JP:
120 name = "NTSC-M-JP";
121 break;
122 case V4L2_STD_NTSC_443:
123 name = "NTSC-443";
124 break;
125 case V4L2_STD_NTSC_M_KR:
126 name = "NTSC-M-KR";
127 break;
128 case V4L2_STD_SECAM:
129 name = "SECAM";
130 break;
131 case V4L2_STD_SECAM_DK:
132 name = "SECAM-DK";
133 break;
134 case V4L2_STD_SECAM_B:
135 name = "SECAM-B";
136 break;
137 case V4L2_STD_SECAM_D:
138 name = "SECAM-D";
139 break;
140 case V4L2_STD_SECAM_G:
141 name = "SECAM-G";
142 break;
143 case V4L2_STD_SECAM_H:
144 name = "SECAM-H";
145 break;
146 case V4L2_STD_SECAM_K:
147 name = "SECAM-K";
148 break;
149 case V4L2_STD_SECAM_K1:
150 name = "SECAM-K1";
151 break;
152 case V4L2_STD_SECAM_L:
153 name = "SECAM-L";
154 break;
155 case V4L2_STD_SECAM_LC:
156 name = "SECAM-LC";
157 break;
158 default:
159 name = "Unknown";
160 break;
161 }
162
163 return name;
164}
165EXPORT_SYMBOL(v4l2_norm_to_name);
166
167/* Fill in the fields of a v4l2_standard structure according to the
168 'id' and 'transmission' parameters. Returns negative on error. */
169int v4l2_video_std_construct(struct v4l2_standard *vs,
170 int id, char *name)
171{
172 u32 index = vs->index;
173
174 memset(vs, 0, sizeof(struct v4l2_standard));
175 vs->index = index;
176 vs->id = id;
177 if (id & V4L2_STD_525_60) {
178 vs->frameperiod.numerator = 1001;
179 vs->frameperiod.denominator = 30000;
180 vs->framelines = 525;
181 } else {
182 vs->frameperiod.numerator = 1;
183 vs->frameperiod.denominator = 25;
184 vs->framelines = 625;
185 }
186 strlcpy(vs->name, name, sizeof(vs->name));
187 return 0;
188}
189EXPORT_SYMBOL(v4l2_video_std_construct);
190
191/* ----------------------------------------------------------------- */
192/* some arrays for pretty-printing debug messages of enum types */
193
194char *v4l2_field_names[] = {
195 [V4L2_FIELD_ANY] = "any",
196 [V4L2_FIELD_NONE] = "none",
197 [V4L2_FIELD_TOP] = "top",
198 [V4L2_FIELD_BOTTOM] = "bottom",
199 [V4L2_FIELD_INTERLACED] = "interlaced",
200 [V4L2_FIELD_SEQ_TB] = "seq-tb",
201 [V4L2_FIELD_SEQ_BT] = "seq-bt",
202 [V4L2_FIELD_ALTERNATE] = "alternate",
203 [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
204 [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
205};
206EXPORT_SYMBOL(v4l2_field_names);
207
208char *v4l2_type_names[] = {
209 [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "video-cap",
210 [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "video-over",
211 [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "video-out",
212 [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
213 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
214 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-cap",
215 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
216 [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "video-out-over",
217};
218EXPORT_SYMBOL(v4l2_type_names);
219
220static char *v4l2_memory_names[] = {
221 [V4L2_MEMORY_MMAP] = "mmap",
222 [V4L2_MEMORY_USERPTR] = "userptr",
223 [V4L2_MEMORY_OVERLAY] = "overlay",
224};
225
226#define prt_names(a, arr) ((((a) >= 0) && ((a) < ARRAY_SIZE(arr))) ? \
227 arr[a] : "unknown")
228
229/* ------------------------------------------------------------------ */
230/* debug help functions */
231
232#ifdef CONFIG_VIDEO_V4L1_COMPAT
233static const char *v4l1_ioctls[] = {
234 [_IOC_NR(VIDIOCGCAP)] = "VIDIOCGCAP",
235 [_IOC_NR(VIDIOCGCHAN)] = "VIDIOCGCHAN",
236 [_IOC_NR(VIDIOCSCHAN)] = "VIDIOCSCHAN",
237 [_IOC_NR(VIDIOCGTUNER)] = "VIDIOCGTUNER",
238 [_IOC_NR(VIDIOCSTUNER)] = "VIDIOCSTUNER",
239 [_IOC_NR(VIDIOCGPICT)] = "VIDIOCGPICT",
240 [_IOC_NR(VIDIOCSPICT)] = "VIDIOCSPICT",
241 [_IOC_NR(VIDIOCCAPTURE)] = "VIDIOCCAPTURE",
242 [_IOC_NR(VIDIOCGWIN)] = "VIDIOCGWIN",
243 [_IOC_NR(VIDIOCSWIN)] = "VIDIOCSWIN",
244 [_IOC_NR(VIDIOCGFBUF)] = "VIDIOCGFBUF",
245 [_IOC_NR(VIDIOCSFBUF)] = "VIDIOCSFBUF",
246 [_IOC_NR(VIDIOCKEY)] = "VIDIOCKEY",
247 [_IOC_NR(VIDIOCGFREQ)] = "VIDIOCGFREQ",
248 [_IOC_NR(VIDIOCSFREQ)] = "VIDIOCSFREQ",
249 [_IOC_NR(VIDIOCGAUDIO)] = "VIDIOCGAUDIO",
250 [_IOC_NR(VIDIOCSAUDIO)] = "VIDIOCSAUDIO",
251 [_IOC_NR(VIDIOCSYNC)] = "VIDIOCSYNC",
252 [_IOC_NR(VIDIOCMCAPTURE)] = "VIDIOCMCAPTURE",
253 [_IOC_NR(VIDIOCGMBUF)] = "VIDIOCGMBUF",
254 [_IOC_NR(VIDIOCGUNIT)] = "VIDIOCGUNIT",
255 [_IOC_NR(VIDIOCGCAPTURE)] = "VIDIOCGCAPTURE",
256 [_IOC_NR(VIDIOCSCAPTURE)] = "VIDIOCSCAPTURE",
257 [_IOC_NR(VIDIOCSPLAYMODE)] = "VIDIOCSPLAYMODE",
258 [_IOC_NR(VIDIOCSWRITEMODE)] = "VIDIOCSWRITEMODE",
259 [_IOC_NR(VIDIOCGPLAYINFO)] = "VIDIOCGPLAYINFO",
260 [_IOC_NR(VIDIOCSMICROCODE)] = "VIDIOCSMICROCODE",
261 [_IOC_NR(VIDIOCGVBIFMT)] = "VIDIOCGVBIFMT",
262 [_IOC_NR(VIDIOCSVBIFMT)] = "VIDIOCSVBIFMT"
263};
264#define V4L1_IOCTLS ARRAY_SIZE(v4l1_ioctls)
265#endif
266
267static const char *v4l2_ioctls[] = {
268 [_IOC_NR(VIDIOC_QUERYCAP)] = "VIDIOC_QUERYCAP",
269 [_IOC_NR(VIDIOC_RESERVED)] = "VIDIOC_RESERVED",
270 [_IOC_NR(VIDIOC_ENUM_FMT)] = "VIDIOC_ENUM_FMT",
271 [_IOC_NR(VIDIOC_G_FMT)] = "VIDIOC_G_FMT",
272 [_IOC_NR(VIDIOC_S_FMT)] = "VIDIOC_S_FMT",
273 [_IOC_NR(VIDIOC_REQBUFS)] = "VIDIOC_REQBUFS",
274 [_IOC_NR(VIDIOC_QUERYBUF)] = "VIDIOC_QUERYBUF",
275 [_IOC_NR(VIDIOC_G_FBUF)] = "VIDIOC_G_FBUF",
276 [_IOC_NR(VIDIOC_S_FBUF)] = "VIDIOC_S_FBUF",
277 [_IOC_NR(VIDIOC_OVERLAY)] = "VIDIOC_OVERLAY",
278 [_IOC_NR(VIDIOC_QBUF)] = "VIDIOC_QBUF",
279 [_IOC_NR(VIDIOC_DQBUF)] = "VIDIOC_DQBUF",
280 [_IOC_NR(VIDIOC_STREAMON)] = "VIDIOC_STREAMON",
281 [_IOC_NR(VIDIOC_STREAMOFF)] = "VIDIOC_STREAMOFF",
282 [_IOC_NR(VIDIOC_G_PARM)] = "VIDIOC_G_PARM",
283 [_IOC_NR(VIDIOC_S_PARM)] = "VIDIOC_S_PARM",
284 [_IOC_NR(VIDIOC_G_STD)] = "VIDIOC_G_STD",
285 [_IOC_NR(VIDIOC_S_STD)] = "VIDIOC_S_STD",
286 [_IOC_NR(VIDIOC_ENUMSTD)] = "VIDIOC_ENUMSTD",
287 [_IOC_NR(VIDIOC_ENUMINPUT)] = "VIDIOC_ENUMINPUT",
288 [_IOC_NR(VIDIOC_G_CTRL)] = "VIDIOC_G_CTRL",
289 [_IOC_NR(VIDIOC_S_CTRL)] = "VIDIOC_S_CTRL",
290 [_IOC_NR(VIDIOC_G_TUNER)] = "VIDIOC_G_TUNER",
291 [_IOC_NR(VIDIOC_S_TUNER)] = "VIDIOC_S_TUNER",
292 [_IOC_NR(VIDIOC_G_AUDIO)] = "VIDIOC_G_AUDIO",
293 [_IOC_NR(VIDIOC_S_AUDIO)] = "VIDIOC_S_AUDIO",
294 [_IOC_NR(VIDIOC_QUERYCTRL)] = "VIDIOC_QUERYCTRL",
295 [_IOC_NR(VIDIOC_QUERYMENU)] = "VIDIOC_QUERYMENU",
296 [_IOC_NR(VIDIOC_G_INPUT)] = "VIDIOC_G_INPUT",
297 [_IOC_NR(VIDIOC_S_INPUT)] = "VIDIOC_S_INPUT",
298 [_IOC_NR(VIDIOC_G_OUTPUT)] = "VIDIOC_G_OUTPUT",
299 [_IOC_NR(VIDIOC_S_OUTPUT)] = "VIDIOC_S_OUTPUT",
300 [_IOC_NR(VIDIOC_ENUMOUTPUT)] = "VIDIOC_ENUMOUTPUT",
301 [_IOC_NR(VIDIOC_G_AUDOUT)] = "VIDIOC_G_AUDOUT",
302 [_IOC_NR(VIDIOC_S_AUDOUT)] = "VIDIOC_S_AUDOUT",
303 [_IOC_NR(VIDIOC_G_MODULATOR)] = "VIDIOC_G_MODULATOR",
304 [_IOC_NR(VIDIOC_S_MODULATOR)] = "VIDIOC_S_MODULATOR",
305 [_IOC_NR(VIDIOC_G_FREQUENCY)] = "VIDIOC_G_FREQUENCY",
306 [_IOC_NR(VIDIOC_S_FREQUENCY)] = "VIDIOC_S_FREQUENCY",
307 [_IOC_NR(VIDIOC_CROPCAP)] = "VIDIOC_CROPCAP",
308 [_IOC_NR(VIDIOC_G_CROP)] = "VIDIOC_G_CROP",
309 [_IOC_NR(VIDIOC_S_CROP)] = "VIDIOC_S_CROP",
310 [_IOC_NR(VIDIOC_G_JPEGCOMP)] = "VIDIOC_G_JPEGCOMP",
311 [_IOC_NR(VIDIOC_S_JPEGCOMP)] = "VIDIOC_S_JPEGCOMP",
312 [_IOC_NR(VIDIOC_QUERYSTD)] = "VIDIOC_QUERYSTD",
313 [_IOC_NR(VIDIOC_TRY_FMT)] = "VIDIOC_TRY_FMT",
314 [_IOC_NR(VIDIOC_ENUMAUDIO)] = "VIDIOC_ENUMAUDIO",
315 [_IOC_NR(VIDIOC_ENUMAUDOUT)] = "VIDIOC_ENUMAUDOUT",
316 [_IOC_NR(VIDIOC_G_PRIORITY)] = "VIDIOC_G_PRIORITY",
317 [_IOC_NR(VIDIOC_S_PRIORITY)] = "VIDIOC_S_PRIORITY",
318 [_IOC_NR(VIDIOC_G_SLICED_VBI_CAP)] = "VIDIOC_G_SLICED_VBI_CAP",
319 [_IOC_NR(VIDIOC_LOG_STATUS)] = "VIDIOC_LOG_STATUS",
320 [_IOC_NR(VIDIOC_G_EXT_CTRLS)] = "VIDIOC_G_EXT_CTRLS",
321 [_IOC_NR(VIDIOC_S_EXT_CTRLS)] = "VIDIOC_S_EXT_CTRLS",
322 [_IOC_NR(VIDIOC_TRY_EXT_CTRLS)] = "VIDIOC_TRY_EXT_CTRLS",
323#if 1
324 [_IOC_NR(VIDIOC_ENUM_FRAMESIZES)] = "VIDIOC_ENUM_FRAMESIZES",
325 [_IOC_NR(VIDIOC_ENUM_FRAMEINTERVALS)] = "VIDIOC_ENUM_FRAMEINTERVALS",
326 [_IOC_NR(VIDIOC_G_ENC_INDEX)] = "VIDIOC_G_ENC_INDEX",
327 [_IOC_NR(VIDIOC_ENCODER_CMD)] = "VIDIOC_ENCODER_CMD",
328 [_IOC_NR(VIDIOC_TRY_ENCODER_CMD)] = "VIDIOC_TRY_ENCODER_CMD",
329
330 [_IOC_NR(VIDIOC_DBG_S_REGISTER)] = "VIDIOC_DBG_S_REGISTER",
331 [_IOC_NR(VIDIOC_DBG_G_REGISTER)] = "VIDIOC_DBG_G_REGISTER",
332
333 [_IOC_NR(VIDIOC_G_CHIP_IDENT)] = "VIDIOC_G_CHIP_IDENT",
334#endif
335};
336#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
337
338static const char *v4l2_int_ioctls[] = {
339#ifdef CONFIG_VIDEO_V4L1_COMPAT
340 [_IOC_NR(DECODER_GET_CAPABILITIES)] = "DECODER_GET_CAPABILITIES",
341 [_IOC_NR(DECODER_GET_STATUS)] = "DECODER_GET_STATUS",
342 [_IOC_NR(DECODER_SET_NORM)] = "DECODER_SET_NORM",
343 [_IOC_NR(DECODER_SET_INPUT)] = "DECODER_SET_INPUT",
344 [_IOC_NR(DECODER_SET_OUTPUT)] = "DECODER_SET_OUTPUT",
345 [_IOC_NR(DECODER_ENABLE_OUTPUT)] = "DECODER_ENABLE_OUTPUT",
346 [_IOC_NR(DECODER_SET_PICTURE)] = "DECODER_SET_PICTURE",
347 [_IOC_NR(DECODER_SET_GPIO)] = "DECODER_SET_GPIO",
348 [_IOC_NR(DECODER_INIT)] = "DECODER_INIT",
349 [_IOC_NR(DECODER_SET_VBI_BYPASS)] = "DECODER_SET_VBI_BYPASS",
350 [_IOC_NR(DECODER_DUMP)] = "DECODER_DUMP",
351#endif
352 [_IOC_NR(AUDC_SET_RADIO)] = "AUDC_SET_RADIO",
353
354 [_IOC_NR(TUNER_SET_TYPE_ADDR)] = "TUNER_SET_TYPE_ADDR",
355 [_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY",
356 [_IOC_NR(TUNER_SET_CONFIG)] = "TUNER_SET_CONFIG",
357
358 [_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE",
359 [_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET",
360 [_IOC_NR(VIDIOC_INT_AUDIO_CLOCK_FREQ)] = "VIDIOC_INT_AUDIO_CLOCK_FREQ",
361 [_IOC_NR(VIDIOC_INT_DECODE_VBI_LINE)] = "VIDIOC_INT_DECODE_VBI_LINE",
362 [_IOC_NR(VIDIOC_INT_S_VBI_DATA)] = "VIDIOC_INT_S_VBI_DATA",
363 [_IOC_NR(VIDIOC_INT_G_VBI_DATA)] = "VIDIOC_INT_G_VBI_DATA",
364 [_IOC_NR(VIDIOC_INT_I2S_CLOCK_FREQ)] = "VIDIOC_INT_I2S_CLOCK_FREQ",
365 [_IOC_NR(VIDIOC_INT_S_STANDBY)] = "VIDIOC_INT_S_STANDBY",
366 [_IOC_NR(VIDIOC_INT_S_AUDIO_ROUTING)] = "VIDIOC_INT_S_AUDIO_ROUTING",
367 [_IOC_NR(VIDIOC_INT_G_AUDIO_ROUTING)] = "VIDIOC_INT_G_AUDIO_ROUTING",
368 [_IOC_NR(VIDIOC_INT_S_VIDEO_ROUTING)] = "VIDIOC_INT_S_VIDEO_ROUTING",
369 [_IOC_NR(VIDIOC_INT_G_VIDEO_ROUTING)] = "VIDIOC_INT_G_VIDEO_ROUTING",
370 [_IOC_NR(VIDIOC_INT_S_CRYSTAL_FREQ)] = "VIDIOC_INT_S_CRYSTAL_FREQ",
371 [_IOC_NR(VIDIOC_INT_INIT)] = "VIDIOC_INT_INIT",
372 [_IOC_NR(VIDIOC_INT_G_STD_OUTPUT)] = "VIDIOC_INT_G_STD_OUTPUT",
373 [_IOC_NR(VIDIOC_INT_S_STD_OUTPUT)] = "VIDIOC_INT_S_STD_OUTPUT",
374};
375#define V4L2_INT_IOCTLS ARRAY_SIZE(v4l2_int_ioctls)
376
377/* Common ioctl debug function. This function can be used by
378 external ioctl messages as well as internal V4L ioctl */
379void v4l_printk_ioctl(unsigned int cmd)
380{
381 char *dir;
382
383 switch (_IOC_DIR(cmd)) {
384 case _IOC_NONE: dir = "--"; break;
385 case _IOC_READ: dir = "r-"; break;
386 case _IOC_WRITE: dir = "-w"; break;
387 case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
388 default: dir = "*ERR*"; break;
389 }
390 switch (_IOC_TYPE(cmd)) {
391 case 'd':
392 printk("v4l2_int ioctl %s, dir=%s (0x%08x)\n",
393 (_IOC_NR(cmd) < V4L2_INT_IOCTLS) ?
394 v4l2_int_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
395 break;
396#ifdef CONFIG_VIDEO_V4L1_COMPAT
397 case 'v':
398 printk("v4l1 ioctl %s, dir=%s (0x%08x)\n",
399 (_IOC_NR(cmd) < V4L1_IOCTLS) ?
400 v4l1_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
401 break;
402#endif
403 case 'V':
404 printk("v4l2 ioctl %s, dir=%s (0x%08x)\n",
405 (_IOC_NR(cmd) < V4L2_IOCTLS) ?
406 v4l2_ioctls[_IOC_NR(cmd)] : "UNKNOWN", dir, cmd);
407 break;
408
409 default:
410 printk("unknown ioctl '%c', dir=%s, #%d (0x%08x)\n",
411 _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
412 }
413}
414EXPORT_SYMBOL(v4l_printk_ioctl);
415
53/* 416/*
54 * sysfs stuff 417 * sysfs stuff
55 */ 418 */
@@ -69,11 +432,13 @@ struct video_device *video_device_alloc(void)
69 vfd = kzalloc(sizeof(*vfd),GFP_KERNEL); 432 vfd = kzalloc(sizeof(*vfd),GFP_KERNEL);
70 return vfd; 433 return vfd;
71} 434}
435EXPORT_SYMBOL(video_device_alloc);
72 436
73void video_device_release(struct video_device *vfd) 437void video_device_release(struct video_device *vfd)
74{ 438{
75 kfree(vfd); 439 kfree(vfd);
76} 440}
441EXPORT_SYMBOL(video_device_release);
77 442
78static void video_release(struct device *cd) 443static void video_release(struct device *cd)
79{ 444{
@@ -110,6 +475,7 @@ struct video_device* video_devdata(struct file *file)
110{ 475{
111 return video_device[iminor(file->f_path.dentry->d_inode)]; 476 return video_device[iminor(file->f_path.dentry->d_inode)];
112} 477}
478EXPORT_SYMBOL(video_devdata);
113 479
114/* 480/*
115 * Open a video device - FIXME: Obsoleted 481 * Open a video device - FIXME: Obsoleted
@@ -278,6 +644,7 @@ out:
278 kfree(mbuf); 644 kfree(mbuf);
279 return err; 645 return err;
280} 646}
647EXPORT_SYMBOL(video_usercopy);
281 648
282/* 649/*
283 * open/release helper functions -- handle exclusive opens 650 * open/release helper functions -- handle exclusive opens
@@ -297,6 +664,7 @@ int video_exclusive_open(struct inode *inode, struct file *file)
297 mutex_unlock(&vfl->lock); 664 mutex_unlock(&vfl->lock);
298 return retval; 665 return retval;
299} 666}
667EXPORT_SYMBOL(video_exclusive_open);
300 668
301int video_exclusive_release(struct inode *inode, struct file *file) 669int video_exclusive_release(struct inode *inode, struct file *file)
302{ 670{
@@ -305,41 +673,7 @@ int video_exclusive_release(struct inode *inode, struct file *file)
305 vfl->users--; 673 vfl->users--;
306 return 0; 674 return 0;
307} 675}
308 676EXPORT_SYMBOL(video_exclusive_release);
309static char *v4l2_memory_names[] = {
310 [V4L2_MEMORY_MMAP] = "mmap",
311 [V4L2_MEMORY_USERPTR] = "userptr",
312 [V4L2_MEMORY_OVERLAY] = "overlay",
313};
314
315
316/* FIXME: Those stuff are replicated also on v4l2-common.c */
317static char *v4l2_type_names_FIXME[] = {
318 [V4L2_BUF_TYPE_VIDEO_CAPTURE] = "video-cap",
319 [V4L2_BUF_TYPE_VIDEO_OVERLAY] = "video-over",
320 [V4L2_BUF_TYPE_VIDEO_OUTPUT] = "video-out",
321 [V4L2_BUF_TYPE_VBI_CAPTURE] = "vbi-cap",
322 [V4L2_BUF_TYPE_VBI_OUTPUT] = "vbi-out",
323 [V4L2_BUF_TYPE_SLICED_VBI_OUTPUT] = "sliced-vbi-out",
324 [V4L2_BUF_TYPE_SLICED_VBI_CAPTURE] = "sliced-vbi-capture",
325 [V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY] = "video-out-over",
326 [V4L2_BUF_TYPE_PRIVATE] = "private",
327};
328
329static char *v4l2_field_names_FIXME[] = {
330 [V4L2_FIELD_ANY] = "any",
331 [V4L2_FIELD_NONE] = "none",
332 [V4L2_FIELD_TOP] = "top",
333 [V4L2_FIELD_BOTTOM] = "bottom",
334 [V4L2_FIELD_INTERLACED] = "interlaced",
335 [V4L2_FIELD_SEQ_TB] = "seq-tb",
336 [V4L2_FIELD_SEQ_BT] = "seq-bt",
337 [V4L2_FIELD_ALTERNATE] = "alternate",
338 [V4L2_FIELD_INTERLACED_TB] = "interlaced-tb",
339 [V4L2_FIELD_INTERLACED_BT] = "interlaced-bt",
340};
341
342#define prt_names(a,arr) (((a)>=0)&&((a)<ARRAY_SIZE(arr)))?arr[a]:"unknown"
343 677
344static void dbgbuf(unsigned int cmd, struct video_device *vfd, 678static void dbgbuf(unsigned int cmd, struct video_device *vfd,
345 struct v4l2_buffer *p) 679 struct v4l2_buffer *p)
@@ -354,10 +688,10 @@ static void dbgbuf(unsigned int cmd, struct video_device *vfd,
354 (int)(p->timestamp.tv_sec%60), 688 (int)(p->timestamp.tv_sec%60),
355 p->timestamp.tv_usec, 689 p->timestamp.tv_usec,
356 p->index, 690 p->index,
357 prt_names(p->type,v4l2_type_names_FIXME), 691 prt_names(p->type, v4l2_type_names),
358 p->bytesused,p->flags, 692 p->bytesused, p->flags,
359 p->field,p->sequence, 693 p->field, p->sequence,
360 prt_names(p->memory,v4l2_memory_names), 694 prt_names(p->memory, v4l2_memory_names),
361 p->m.userptr, p->length); 695 p->m.userptr, p->length);
362 dbgarg2 ("timecode= %02d:%02d:%02d type=%d, " 696 dbgarg2 ("timecode= %02d:%02d:%02d type=%d, "
363 "flags=0x%08d, frames=%d, userbits=0x%08x\n", 697 "flags=0x%08d, frames=%d, userbits=0x%08x\n",
@@ -382,8 +716,8 @@ static inline void v4l_print_pix_fmt (struct video_device *vfd,
382 (fmt->pixelformat >> 8) & 0xff, 716 (fmt->pixelformat >> 8) & 0xff,
383 (fmt->pixelformat >> 16) & 0xff, 717 (fmt->pixelformat >> 16) & 0xff,
384 (fmt->pixelformat >> 24) & 0xff, 718 (fmt->pixelformat >> 24) & 0xff,
385 prt_names(fmt->field,v4l2_field_names_FIXME), 719 prt_names(fmt->field, v4l2_field_names),
386 fmt->bytesperline,fmt->sizeimage,fmt->colorspace); 720 fmt->bytesperline, fmt->sizeimage, fmt->colorspace);
387}; 721};
388 722
389 723
@@ -597,7 +931,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
597 931
598 /* FIXME: Should be one dump per type */ 932 /* FIXME: Should be one dump per type */
599 dbgarg (cmd, "type=%s\n", prt_names(type, 933 dbgarg (cmd, "type=%s\n", prt_names(type,
600 v4l2_type_names_FIXME)); 934 v4l2_type_names));
601 935
602 switch (type) { 936 switch (type) {
603 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 937 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
@@ -650,7 +984,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
650 984
651 /* FIXME: Should be one dump per type */ 985 /* FIXME: Should be one dump per type */
652 dbgarg (cmd, "type=%s\n", prt_names(f->type, 986 dbgarg (cmd, "type=%s\n", prt_names(f->type,
653 v4l2_type_names_FIXME)); 987 v4l2_type_names));
654 988
655 switch (f->type) { 989 switch (f->type) {
656 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 990 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
@@ -702,7 +1036,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
702 1036
703 /* FIXME: Should be one dump per type */ 1037 /* FIXME: Should be one dump per type */
704 dbgarg (cmd, "type=%s\n", prt_names(f->type, 1038 dbgarg (cmd, "type=%s\n", prt_names(f->type,
705 v4l2_type_names_FIXME)); 1039 v4l2_type_names));
706 switch (f->type) { 1040 switch (f->type) {
707 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1041 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
708 if (vfd->vidioc_try_fmt_cap) 1042 if (vfd->vidioc_try_fmt_cap)
@@ -768,8 +1102,8 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
768 ret=vfd->vidioc_reqbufs(file, fh, p); 1102 ret=vfd->vidioc_reqbufs(file, fh, p);
769 dbgarg (cmd, "count=%d, type=%s, memory=%s\n", 1103 dbgarg (cmd, "count=%d, type=%s, memory=%s\n",
770 p->count, 1104 p->count,
771 prt_names(p->type,v4l2_type_names_FIXME), 1105 prt_names(p->type, v4l2_type_names),
772 prt_names(p->memory,v4l2_memory_names)); 1106 prt_names(p->memory, v4l2_memory_names));
773 break; 1107 break;
774 } 1108 }
775 case VIDIOC_QUERYBUF: 1109 case VIDIOC_QUERYBUF:
@@ -858,7 +1192,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
858 enum v4l2_buf_type i = *(int *)arg; 1192 enum v4l2_buf_type i = *(int *)arg;
859 if (!vfd->vidioc_streamon) 1193 if (!vfd->vidioc_streamon)
860 break; 1194 break;
861 dbgarg (cmd, "type=%s\n", prt_names(i,v4l2_type_names_FIXME)); 1195 dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
862 ret=vfd->vidioc_streamon(file, fh,i); 1196 ret=vfd->vidioc_streamon(file, fh,i);
863 break; 1197 break;
864 } 1198 }
@@ -868,7 +1202,7 @@ static int __video_do_ioctl(struct inode *inode, struct file *file,
868 1202
869 if (!vfd->vidioc_streamoff) 1203 if (!vfd->vidioc_streamoff)
870 break; 1204 break;
871 dbgarg (cmd, "type=%s\n", prt_names(i,v4l2_type_names_FIXME)); 1205 dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
872 ret=vfd->vidioc_streamoff(file, fh, i); 1206 ret=vfd->vidioc_streamoff(file, fh, i);
873 break; 1207 break;
874 } 1208 }
@@ -1624,7 +1958,7 @@ out:
1624 kfree(mbuf); 1958 kfree(mbuf);
1625 return err; 1959 return err;
1626} 1960}
1627 1961EXPORT_SYMBOL(video_ioctl2);
1628 1962
1629static const struct file_operations video_fops; 1963static const struct file_operations video_fops;
1630 1964
@@ -1743,6 +2077,7 @@ fail_minor:
1743 mutex_unlock(&videodev_lock); 2077 mutex_unlock(&videodev_lock);
1744 return ret; 2078 return ret;
1745} 2079}
2080EXPORT_SYMBOL(video_register_device);
1746 2081
1747/** 2082/**
1748 * video_unregister_device - unregister a video4linux device 2083 * video_unregister_device - unregister a video4linux device
@@ -1762,6 +2097,7 @@ void video_unregister_device(struct video_device *vfd)
1762 device_unregister(&vfd->class_dev); 2097 device_unregister(&vfd->class_dev);
1763 mutex_unlock(&videodev_lock); 2098 mutex_unlock(&videodev_lock);
1764} 2099}
2100EXPORT_SYMBOL(video_unregister_device);
1765 2101
1766/* 2102/*
1767 * Video fs operations 2103 * Video fs operations
@@ -1806,16 +2142,6 @@ static void __exit videodev_exit(void)
1806module_init(videodev_init) 2142module_init(videodev_init)
1807module_exit(videodev_exit) 2143module_exit(videodev_exit)
1808 2144
1809EXPORT_SYMBOL(video_register_device);
1810EXPORT_SYMBOL(video_unregister_device);
1811EXPORT_SYMBOL(video_devdata);
1812EXPORT_SYMBOL(video_usercopy);
1813EXPORT_SYMBOL(video_exclusive_open);
1814EXPORT_SYMBOL(video_exclusive_release);
1815EXPORT_SYMBOL(video_ioctl2);
1816EXPORT_SYMBOL(video_device_alloc);
1817EXPORT_SYMBOL(video_device_release);
1818
1819MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>"); 2145MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>");
1820MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2"); 2146MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2");
1821MODULE_LICENSE("GPL"); 2147MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/zoran.h b/drivers/media/video/zoran.h
index 937c4a616c0e..498a43c1f2b1 100644
--- a/drivers/media/video/zoran.h
+++ b/drivers/media/video/zoran.h
@@ -221,15 +221,15 @@ enum zoran_map_mode {
221}; 221};
222 222
223enum gpio_type { 223enum gpio_type {
224 GPIO_JPEG_SLEEP = 0, 224 ZR_GPIO_JPEG_SLEEP = 0,
225 GPIO_JPEG_RESET, 225 ZR_GPIO_JPEG_RESET,
226 GPIO_JPEG_FRAME, 226 ZR_GPIO_JPEG_FRAME,
227 GPIO_VID_DIR, 227 ZR_GPIO_VID_DIR,
228 GPIO_VID_EN, 228 ZR_GPIO_VID_EN,
229 GPIO_VID_RESET, 229 ZR_GPIO_VID_RESET,
230 GPIO_CLK_SEL1, 230 ZR_GPIO_CLK_SEL1,
231 GPIO_CLK_SEL2, 231 ZR_GPIO_CLK_SEL2,
232 GPIO_MAX, 232 ZR_GPIO_MAX,
233}; 233};
234 234
235enum gpcs_type { 235enum gpcs_type {
@@ -378,11 +378,11 @@ struct card_info {
378 378
379 u32 jpeg_int; /* JPEG interrupt */ 379 u32 jpeg_int; /* JPEG interrupt */
380 u32 vsync_int; /* VSYNC interrupt */ 380 u32 vsync_int; /* VSYNC interrupt */
381 s8 gpio[GPIO_MAX]; 381 s8 gpio[ZR_GPIO_MAX];
382 u8 gpcs[GPCS_MAX]; 382 u8 gpcs[GPCS_MAX];
383 383
384 struct vfe_polarity vfe_pol; 384 struct vfe_polarity vfe_pol;
385 u8 gpio_pol[GPIO_MAX]; 385 u8 gpio_pol[ZR_GPIO_MAX];
386 386
387 /* is the /GWS line conected? */ 387 /* is the /GWS line conected? */
388 u8 gws_not_connected; 388 u8 gws_not_connected;
diff --git a/drivers/media/video/zoran_device.c b/drivers/media/video/zoran_device.c
index 68c7c505587e..f97c20692057 100644
--- a/drivers/media/video/zoran_device.c
+++ b/drivers/media/video/zoran_device.c
@@ -250,7 +250,7 @@ void
250jpeg_codec_sleep (struct zoran *zr, 250jpeg_codec_sleep (struct zoran *zr,
251 int sleep) 251 int sleep)
252{ 252{
253 GPIO(zr, zr->card.gpio[GPIO_JPEG_SLEEP], !sleep); 253 GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_SLEEP], !sleep);
254 if (!sleep) { 254 if (!sleep) {
255 dprintk(3, 255 dprintk(3,
256 KERN_DEBUG 256 KERN_DEBUG
@@ -277,9 +277,9 @@ jpeg_codec_reset (struct zoran *zr)
277 0); 277 0);
278 udelay(2); 278 udelay(2);
279 } else { 279 } else {
280 GPIO(zr, zr->card.gpio[GPIO_JPEG_RESET], 0); 280 GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 0);
281 udelay(2); 281 udelay(2);
282 GPIO(zr, zr->card.gpio[GPIO_JPEG_RESET], 1); 282 GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_RESET], 1);
283 udelay(2); 283 udelay(2);
284 } 284 }
285 285
@@ -688,7 +688,7 @@ static inline void
688set_frame (struct zoran *zr, 688set_frame (struct zoran *zr,
689 int val) 689 int val)
690{ 690{
691 GPIO(zr, zr->card.gpio[GPIO_JPEG_FRAME], val); 691 GPIO(zr, zr->card.gpio[ZR_GPIO_JPEG_FRAME], val);
692} 692}
693 693
694static void 694static void
@@ -704,8 +704,8 @@ set_videobus_dir (struct zoran *zr,
704 GPIO(zr, 5, 1); 704 GPIO(zr, 5, 1);
705 break; 705 break;
706 default: 706 default:
707 GPIO(zr, zr->card.gpio[GPIO_VID_DIR], 707 GPIO(zr, zr->card.gpio[ZR_GPIO_VID_DIR],
708 zr->card.gpio_pol[GPIO_VID_DIR] ? !val : val); 708 zr->card.gpio_pol[ZR_GPIO_VID_DIR] ? !val : val);
709 break; 709 break;
710 } 710 }
711} 711}
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index 1fdbb46de7f3..1b44784d0efb 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -93,6 +93,8 @@ static struct usb_device_id device_table[] = {
93 {USB_DEVICE(0x06d6, 0x0034), .driver_info = METHOD0 }, 93 {USB_DEVICE(0x06d6, 0x0034), .driver_info = METHOD0 },
94 {USB_DEVICE(0x0a17, 0x0062), .driver_info = METHOD2 }, 94 {USB_DEVICE(0x0a17, 0x0062), .driver_info = METHOD2 },
95 {USB_DEVICE(0x06d6, 0x003b), .driver_info = METHOD0 }, 95 {USB_DEVICE(0x06d6, 0x003b), .driver_info = METHOD0 },
96 {USB_DEVICE(0x0a17, 0x004e), .driver_info = METHOD2 },
97 {USB_DEVICE(0x041e, 0x405d), .driver_info = METHOD2 },
96 {} /* Terminating entry */ 98 {} /* Terminating entry */
97}; 99};
98 100
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index f55b71a4337d..4fb24215bd95 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -282,7 +282,7 @@ static int tifm_ms_issue_cmd(struct tifm_ms *host)
282 282
283 writel(TIFM_MS_SYS_LATCH 283 writel(TIFM_MS_SYS_LATCH
284 | readl(sock->addr + SOCK_MS_SYSTEM), 284 | readl(sock->addr + SOCK_MS_SYSTEM),
285 sock + SOCK_MS_SYSTEM); 285 sock->addr + SOCK_MS_SYSTEM);
286 writel(0, sock->addr + SOCK_MS_DATA); 286 writel(0, sock->addr + SOCK_MS_DATA);
287 dev_dbg(&sock->dev, "writing %x\n", 0); 287 dev_dbg(&sock->dev, "writing %x\n", 0);
288 288
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 425f60c21fdd..0c303c84b37b 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1470,9 +1470,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1470 if (mpt_debug_level) 1470 if (mpt_debug_level)
1471 printk(KERN_INFO MYNAM ": mpt_debug_level=%xh\n", mpt_debug_level); 1471 printk(KERN_INFO MYNAM ": mpt_debug_level=%xh\n", mpt_debug_level);
1472 1472
1473 if (pci_enable_device(pdev))
1474 return r;
1475
1476 ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC); 1473 ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
1477 if (ioc == NULL) { 1474 if (ioc == NULL) {
1478 printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); 1475 printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
@@ -1482,6 +1479,20 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1482 ioc->id = mpt_ids++; 1479 ioc->id = mpt_ids++;
1483 sprintf(ioc->name, "ioc%d", ioc->id); 1480 sprintf(ioc->name, "ioc%d", ioc->id);
1484 1481
1482 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1483 if (pci_enable_device_mem(pdev)) {
1484 printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
1485 "failed\n", ioc->name);
1486 kfree(ioc);
1487 return r;
1488 }
1489 if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
1490 printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
1491 "MEM failed\n", ioc->name);
1492 kfree(ioc);
1493 return r;
1494 }
1495
1485 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name)); 1496 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name));
1486 1497
1487 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { 1498 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
@@ -1658,6 +1669,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1658 ioc->active = 0; 1669 ioc->active = 0;
1659 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1670 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1660 1671
1672 /* Set IOC ptr in the pcidev's driver data. */
1673 pci_set_drvdata(ioc->pcidev, ioc);
1674
1661 /* Set lookup ptr. */ 1675 /* Set lookup ptr. */
1662 list_add_tail(&ioc->list, &ioc_list); 1676 list_add_tail(&ioc->list, &ioc_list);
1663 1677
@@ -1791,6 +1805,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
1791 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); 1805 CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
1792 1806
1793 pci_disable_device(pdev); 1807 pci_disable_device(pdev);
1808 pci_release_selected_regions(pdev, ioc->bars);
1794 pci_set_power_state(pdev, device_state); 1809 pci_set_power_state(pdev, device_state);
1795 1810
1796 return 0; 1811 return 0;
@@ -1807,7 +1822,6 @@ mpt_resume(struct pci_dev *pdev)
1807 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 1822 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1808 u32 device_state = pdev->current_state; 1823 u32 device_state = pdev->current_state;
1809 int recovery_state; 1824 int recovery_state;
1810 int err;
1811 1825
1812 printk(MYIOC_s_INFO_FMT 1826 printk(MYIOC_s_INFO_FMT
1813 "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n", 1827 "pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
@@ -1815,9 +1829,18 @@ mpt_resume(struct pci_dev *pdev)
1815 1829
1816 pci_set_power_state(pdev, 0); 1830 pci_set_power_state(pdev, 0);
1817 pci_restore_state(pdev); 1831 pci_restore_state(pdev);
1818 err = pci_enable_device(pdev); 1832 if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) {
1819 if (err) 1833 ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
1820 return err; 1834 IORESOURCE_IO);
1835 if (pci_enable_device(pdev))
1836 return 0;
1837 } else {
1838 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1839 if (pci_enable_device_mem(pdev))
1840 return 0;
1841 }
1842 if (pci_request_selected_regions(pdev, ioc->bars, "mpt"))
1843 return 0;
1821 1844
1822 /* enable interrupts */ 1845 /* enable interrupts */
1823 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); 1846 CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
@@ -1878,6 +1901,7 @@ mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase)
1878 * -2 if READY but IOCFacts Failed 1901 * -2 if READY but IOCFacts Failed
1879 * -3 if READY but PrimeIOCFifos Failed 1902 * -3 if READY but PrimeIOCFifos Failed
1880 * -4 if READY but IOCInit Failed 1903 * -4 if READY but IOCInit Failed
1904 * -5 if failed to enable_device and/or request_selected_regions
1881 */ 1905 */
1882static int 1906static int
1883mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) 1907mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
@@ -1976,6 +2000,18 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1976 } 2000 }
1977 } 2001 }
1978 2002
2003 if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) &&
2004 (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) {
2005 pci_release_selected_regions(ioc->pcidev, ioc->bars);
2006 ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
2007 IORESOURCE_IO);
2008 if (pci_enable_device(ioc->pcidev))
2009 return -5;
2010 if (pci_request_selected_regions(ioc->pcidev, ioc->bars,
2011 "mpt"))
2012 return -5;
2013 }
2014
1979 /* 2015 /*
1980 * Device is reset now. It must have de-asserted the interrupt line 2016 * Device is reset now. It must have de-asserted the interrupt line
1981 * (if it was asserted) and it should be safe to register for the 2017 * (if it was asserted) and it should be safe to register for the
@@ -1999,7 +2035,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
1999 irq_allocated = 1; 2035 irq_allocated = 1;
2000 ioc->pci_irq = ioc->pcidev->irq; 2036 ioc->pci_irq = ioc->pcidev->irq;
2001 pci_set_master(ioc->pcidev); /* ?? */ 2037 pci_set_master(ioc->pcidev); /* ?? */
2002 pci_set_drvdata(ioc->pcidev, ioc);
2003 dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " 2038 dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt "
2004 "%d\n", ioc->name, ioc->pcidev->irq)); 2039 "%d\n", ioc->name, ioc->pcidev->irq));
2005 } 2040 }
@@ -2381,6 +2416,9 @@ mpt_adapter_dispose(MPT_ADAPTER *ioc)
2381 ioc->memmap = NULL; 2416 ioc->memmap = NULL;
2382 } 2417 }
2383 2418
2419 pci_disable_device(ioc->pcidev);
2420 pci_release_selected_regions(ioc->pcidev, ioc->bars);
2421
2384#if defined(CONFIG_MTRR) && 0 2422#if defined(CONFIG_MTRR) && 0
2385 if (ioc->mtrr_reg > 0) { 2423 if (ioc->mtrr_reg > 0) {
2386 mtrr_del(ioc->mtrr_reg, 0, 0); 2424 mtrr_del(ioc->mtrr_reg, 0, 0);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b49b706c0020..caadc68c3000 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -629,6 +629,7 @@ typedef struct _MPT_ADAPTER
629 dma_addr_t HostPageBuffer_dma; 629 dma_addr_t HostPageBuffer_dma;
630 int mtrr_reg; 630 int mtrr_reg;
631 struct pci_dev *pcidev; /* struct pci_dev pointer */ 631 struct pci_dev *pcidev; /* struct pci_dev pointer */
632 int bars; /* bitmask of BAR's that must be configured */
632 u8 __iomem *memmap; /* mmap address */ 633 u8 __iomem *memmap; /* mmap address */
633 struct Scsi_Host *sh; /* Scsi Host pointer */ 634 struct Scsi_Host *sh; /* Scsi Host pointer */
634 SpiCfgData spi_data; /* Scsi config. data */ 635 SpiCfgData spi_data; /* Scsi config. data */
@@ -922,7 +923,7 @@ extern struct proc_dir_entry *mpt_proc_root_dir;
922/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 923/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
923#endif /* } __KERNEL__ */ 924#endif /* } __KERNEL__ */
924 925
925#if defined(__alpha__) || defined(__sparc_v9__) || defined(__ia64__) || defined(__x86_64__) || defined(__powerpc__) 926#ifdef CONFIG_64BIT
926#define CAST_U32_TO_PTR(x) ((void *)(u64)x) 927#define CAST_U32_TO_PTR(x) ((void *)(u64)x)
927#define CAST_PTR_TO_U32(x) ((u32)(u64)x) 928#define CAST_PTR_TO_U32(x) ((u32)(u64)x)
928#else 929#else
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1abc95ca9dfa..982e27b86d10 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -258,6 +258,23 @@ config THINKPAD_ACPI_BAY
258 258
259 If you are not sure, say Y here. 259 If you are not sure, say Y here.
260 260
261config THINKPAD_ACPI_VIDEO
262 bool "Video output control support"
263 depends on THINKPAD_ACPI
264 default y
265 ---help---
266 Allows the thinkpad_acpi driver to provide an interface to control
267 the various video output ports.
268
269 This feature often won't work well, depending on ThinkPad model,
270 display state, video output devices in use, whether there is a X
271 server running, phase of the moon, and the current mood of
272 Schroedinger's cat. If you can use X.org's RandR to control
273 your ThinkPad's video output ports instead of this feature,
274 don't think twice: do it and say N here to save some memory.
275
276 If you are not sure, say Y here.
277
261config THINKPAD_ACPI_HOTKEY_POLL 278config THINKPAD_ACPI_HOTKEY_POLL
262 bool "Suport NVRAM polling for hot keys" 279 bool "Suport NVRAM polling for hot keys"
263 depends on THINKPAD_ACPI 280 depends on THINKPAD_ACPI
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index d7aea93081f2..74d12b4a3abd 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -273,6 +273,15 @@ static struct dmi_system_id acer_quirks[] = {
273 }, 273 },
274 { 274 {
275 .callback = dmi_matched, 275 .callback = dmi_matched,
276 .ident = "Acer TravelMate 4200",
277 .matches = {
278 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
279 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4200"),
280 },
281 .driver_data = &quirk_acer_travelmate_2490,
282 },
283 {
284 .callback = dmi_matched,
276 .ident = "Medion MD 98300", 285 .ident = "Medion MD 98300",
277 .matches = { 286 .matches = {
278 DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), 287 DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c
index f70984ab1e1b..de16e88eb8d3 100644
--- a/drivers/misc/intel_menlow.c
+++ b/drivers/misc/intel_menlow.c
@@ -170,10 +170,13 @@ static int intel_menlow_memory_add(struct acpi_device *device)
170 170
171 cdev = thermal_cooling_device_register("Memory controller", device, 171 cdev = thermal_cooling_device_register("Memory controller", device,
172 &memory_cooling_ops); 172 &memory_cooling_ops);
173 acpi_driver_data(device) = cdev; 173 if (IS_ERR(cdev)) {
174 if (!cdev) 174 result = PTR_ERR(cdev);
175 result = -ENODEV; 175 goto end;
176 else { 176 }
177
178 if (cdev) {
179 acpi_driver_data(device) = cdev;
177 result = sysfs_create_link(&device->dev.kobj, 180 result = sysfs_create_link(&device->dev.kobj,
178 &cdev->device.kobj, "thermal_cooling"); 181 &cdev->device.kobj, "thermal_cooling");
179 if (result) 182 if (result)
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index 7ba1acad5402..bb269d0c677e 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -221,6 +221,7 @@ static struct {
221 u32 hotkey:1; 221 u32 hotkey:1;
222 u32 hotkey_mask:1; 222 u32 hotkey_mask:1;
223 u32 hotkey_wlsw:1; 223 u32 hotkey_wlsw:1;
224 u32 hotkey_tablet:1;
224 u32 light:1; 225 u32 light:1;
225 u32 light_status:1; 226 u32 light_status:1;
226 u32 bright_16levels:1; 227 u32 bright_16levels:1;
@@ -301,6 +302,13 @@ TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
301 "HKEY", /* all others */ 302 "HKEY", /* all others */
302 ); /* 570 */ 303 ); /* 570 */
303 304
305TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
306 "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
307 "\\_SB.PCI0.VID0", /* 770e */
308 "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
309 "\\_SB.PCI0.AGP.VID", /* all others */
310 ); /* R30, R31 */
311
304 312
305/************************************************************************* 313/*************************************************************************
306 * ACPI helpers 314 * ACPI helpers
@@ -1053,6 +1061,9 @@ static struct attribute_set *hotkey_dev_attributes;
1053#define HOTKEY_CONFIG_CRITICAL_END 1061#define HOTKEY_CONFIG_CRITICAL_END
1054#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ 1062#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
1055 1063
1064/* HKEY.MHKG() return bits */
1065#define TP_HOTKEY_TABLET_MASK (1 << 3)
1066
1056static int hotkey_get_wlsw(int *status) 1067static int hotkey_get_wlsw(int *status)
1057{ 1068{
1058 if (!acpi_evalf(hkey_handle, status, "WLSW", "d")) 1069 if (!acpi_evalf(hkey_handle, status, "WLSW", "d"))
@@ -1060,6 +1071,16 @@ static int hotkey_get_wlsw(int *status)
1060 return 0; 1071 return 0;
1061} 1072}
1062 1073
1074static int hotkey_get_tablet_mode(int *status)
1075{
1076 int s;
1077
1078 if (!acpi_evalf(hkey_handle, &s, "MHKG", "d"))
1079 return -EIO;
1080
1081 return ((s & TP_HOTKEY_TABLET_MASK) != 0);
1082}
1083
1063/* 1084/*
1064 * Call with hotkey_mutex held 1085 * Call with hotkey_mutex held
1065 */ 1086 */
@@ -1154,15 +1175,31 @@ static void tpacpi_input_send_radiosw(void)
1154{ 1175{
1155 int wlsw; 1176 int wlsw;
1156 1177
1157 mutex_lock(&tpacpi_inputdev_send_mutex);
1158
1159 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) { 1178 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
1179 mutex_lock(&tpacpi_inputdev_send_mutex);
1180
1160 input_report_switch(tpacpi_inputdev, 1181 input_report_switch(tpacpi_inputdev,
1161 SW_RADIO, !!wlsw); 1182 SW_RADIO, !!wlsw);
1162 input_sync(tpacpi_inputdev); 1183 input_sync(tpacpi_inputdev);
1184
1185 mutex_unlock(&tpacpi_inputdev_send_mutex);
1163 } 1186 }
1187}
1188
1189static void tpacpi_input_send_tabletsw(void)
1190{
1191 int state;
1192
1193 if (tp_features.hotkey_tablet &&
1194 !hotkey_get_tablet_mode(&state)) {
1195 mutex_lock(&tpacpi_inputdev_send_mutex);
1164 1196
1165 mutex_unlock(&tpacpi_inputdev_send_mutex); 1197 input_report_switch(tpacpi_inputdev,
1198 SW_TABLET_MODE, !!state);
1199 input_sync(tpacpi_inputdev);
1200
1201 mutex_unlock(&tpacpi_inputdev_send_mutex);
1202 }
1166} 1203}
1167 1204
1168static void tpacpi_input_send_key(unsigned int scancode) 1205static void tpacpi_input_send_key(unsigned int scancode)
@@ -1417,6 +1454,14 @@ static void hotkey_poll_setup_safe(int may_warn)
1417 mutex_unlock(&hotkey_mutex); 1454 mutex_unlock(&hotkey_mutex);
1418} 1455}
1419 1456
1457#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
1458
1459static void hotkey_poll_setup_safe(int __unused)
1460{
1461}
1462
1463#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
1464
1420static int hotkey_inputdev_open(struct input_dev *dev) 1465static int hotkey_inputdev_open(struct input_dev *dev)
1421{ 1466{
1422 switch (tpacpi_lifecycle) { 1467 switch (tpacpi_lifecycle) {
@@ -1444,7 +1489,6 @@ static void hotkey_inputdev_close(struct input_dev *dev)
1444 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING) 1489 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING)
1445 hotkey_poll_setup_safe(0); 1490 hotkey_poll_setup_safe(0);
1446} 1491}
1447#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
1448 1492
1449/* sysfs hotkey enable ------------------------------------------------- */ 1493/* sysfs hotkey enable ------------------------------------------------- */
1450static ssize_t hotkey_enable_show(struct device *dev, 1494static ssize_t hotkey_enable_show(struct device *dev,
@@ -1666,6 +1710,29 @@ static void hotkey_radio_sw_notify_change(void)
1666 "hotkey_radio_sw"); 1710 "hotkey_radio_sw");
1667} 1711}
1668 1712
1713/* sysfs hotkey tablet mode (pollable) --------------------------------- */
1714static ssize_t hotkey_tablet_mode_show(struct device *dev,
1715 struct device_attribute *attr,
1716 char *buf)
1717{
1718 int res, s;
1719 res = hotkey_get_tablet_mode(&s);
1720 if (res < 0)
1721 return res;
1722
1723 return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
1724}
1725
1726static struct device_attribute dev_attr_hotkey_tablet_mode =
1727 __ATTR(hotkey_tablet_mode, S_IRUGO, hotkey_tablet_mode_show, NULL);
1728
1729static void hotkey_tablet_mode_notify_change(void)
1730{
1731 if (tp_features.hotkey_tablet)
1732 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
1733 "hotkey_tablet_mode");
1734}
1735
1669/* sysfs hotkey report_mode -------------------------------------------- */ 1736/* sysfs hotkey report_mode -------------------------------------------- */
1670static ssize_t hotkey_report_mode_show(struct device *dev, 1737static ssize_t hotkey_report_mode_show(struct device *dev,
1671 struct device_attribute *attr, 1738 struct device_attribute *attr,
@@ -1689,7 +1756,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
1689static struct device_attribute dev_attr_hotkey_wakeup_reason = 1756static struct device_attribute dev_attr_hotkey_wakeup_reason =
1690 __ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL); 1757 __ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
1691 1758
1692void hotkey_wakeup_reason_notify_change(void) 1759static void hotkey_wakeup_reason_notify_change(void)
1693{ 1760{
1694 if (tp_features.hotkey_mask) 1761 if (tp_features.hotkey_mask)
1695 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, 1762 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
@@ -1708,7 +1775,7 @@ static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
1708 __ATTR(wakeup_hotunplug_complete, S_IRUGO, 1775 __ATTR(wakeup_hotunplug_complete, S_IRUGO,
1709 hotkey_wakeup_hotunplug_complete_show, NULL); 1776 hotkey_wakeup_hotunplug_complete_show, NULL);
1710 1777
1711void hotkey_wakeup_hotunplug_complete_notify_change(void) 1778static void hotkey_wakeup_hotunplug_complete_notify_change(void)
1712{ 1779{
1713 if (tp_features.hotkey_mask) 1780 if (tp_features.hotkey_mask)
1714 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, 1781 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
@@ -1878,7 +1945,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
1878 str_supported(tp_features.hotkey)); 1945 str_supported(tp_features.hotkey));
1879 1946
1880 if (tp_features.hotkey) { 1947 if (tp_features.hotkey) {
1881 hotkey_dev_attributes = create_attr_set(12, NULL); 1948 hotkey_dev_attributes = create_attr_set(13, NULL);
1882 if (!hotkey_dev_attributes) 1949 if (!hotkey_dev_attributes)
1883 return -ENOMEM; 1950 return -ENOMEM;
1884 res = add_many_to_attr_set(hotkey_dev_attributes, 1951 res = add_many_to_attr_set(hotkey_dev_attributes,
@@ -1957,6 +2024,18 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
1957 &dev_attr_hotkey_radio_sw.attr); 2024 &dev_attr_hotkey_radio_sw.attr);
1958 } 2025 }
1959 2026
2027 /* For X41t, X60t, X61t Tablets... */
2028 if (!res && acpi_evalf(hkey_handle, &status, "MHKG", "qd")) {
2029 tp_features.hotkey_tablet = 1;
2030 printk(TPACPI_INFO
2031 "possible tablet mode switch found; "
2032 "ThinkPad in %s mode\n",
2033 (status & TP_HOTKEY_TABLET_MASK)?
2034 "tablet" : "laptop");
2035 res = add_to_attr_set(hotkey_dev_attributes,
2036 &dev_attr_hotkey_tablet_mode.attr);
2037 }
2038
1960 if (!res) 2039 if (!res)
1961 res = register_attr_set_with_sysfs( 2040 res = register_attr_set_with_sysfs(
1962 hotkey_dev_attributes, 2041 hotkey_dev_attributes,
@@ -2006,6 +2085,10 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2006 set_bit(EV_SW, tpacpi_inputdev->evbit); 2085 set_bit(EV_SW, tpacpi_inputdev->evbit);
2007 set_bit(SW_RADIO, tpacpi_inputdev->swbit); 2086 set_bit(SW_RADIO, tpacpi_inputdev->swbit);
2008 } 2087 }
2088 if (tp_features.hotkey_tablet) {
2089 set_bit(EV_SW, tpacpi_inputdev->evbit);
2090 set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
2091 }
2009 2092
2010 dbg_printk(TPACPI_DBG_INIT, 2093 dbg_printk(TPACPI_DBG_INIT,
2011 "enabling hot key handling\n"); 2094 "enabling hot key handling\n");
@@ -2023,12 +2106,12 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2023 (hotkey_report_mode < 2) ? 2106 (hotkey_report_mode < 2) ?
2024 "enabled" : "disabled"); 2107 "enabled" : "disabled");
2025 2108
2026#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2027 tpacpi_inputdev->open = &hotkey_inputdev_open; 2109 tpacpi_inputdev->open = &hotkey_inputdev_open;
2028 tpacpi_inputdev->close = &hotkey_inputdev_close; 2110 tpacpi_inputdev->close = &hotkey_inputdev_close;
2029 2111
2030 hotkey_poll_setup_safe(1); 2112 hotkey_poll_setup_safe(1);
2031#endif 2113 tpacpi_input_send_radiosw();
2114 tpacpi_input_send_tabletsw();
2032 } 2115 }
2033 2116
2034 return (tp_features.hotkey)? 0 : 1; 2117 return (tp_features.hotkey)? 0 : 1;
@@ -2156,11 +2239,15 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
2156 /* 0x5000-0x5FFF: human interface helpers */ 2239 /* 0x5000-0x5FFF: human interface helpers */
2157 switch (hkey) { 2240 switch (hkey) {
2158 case 0x5010: /* Lenovo new BIOS: brightness changed */ 2241 case 0x5010: /* Lenovo new BIOS: brightness changed */
2159 case 0x5009: /* X61t: swivel up (tablet mode) */
2160 case 0x500a: /* X61t: swivel down (normal mode) */
2161 case 0x500b: /* X61t: tablet pen inserted into bay */ 2242 case 0x500b: /* X61t: tablet pen inserted into bay */
2162 case 0x500c: /* X61t: tablet pen removed from bay */ 2243 case 0x500c: /* X61t: tablet pen removed from bay */
2163 break; 2244 break;
2245 case 0x5009: /* X41t-X61t: swivel up (tablet mode) */
2246 case 0x500a: /* X41t-X61t: swivel down (normal mode) */
2247 tpacpi_input_send_tabletsw();
2248 hotkey_tablet_mode_notify_change();
2249 send_acpi_ev = 0;
2250 break;
2164 case 0x5001: 2251 case 0x5001:
2165 case 0x5002: 2252 case 0x5002:
2166 /* LID switch events. Do not propagate */ 2253 /* LID switch events. Do not propagate */
@@ -2219,11 +2306,10 @@ static void hotkey_resume(void)
2219 "from firmware\n"); 2306 "from firmware\n");
2220 tpacpi_input_send_radiosw(); 2307 tpacpi_input_send_radiosw();
2221 hotkey_radio_sw_notify_change(); 2308 hotkey_radio_sw_notify_change();
2309 hotkey_tablet_mode_notify_change();
2222 hotkey_wakeup_reason_notify_change(); 2310 hotkey_wakeup_reason_notify_change();
2223 hotkey_wakeup_hotunplug_complete_notify_change(); 2311 hotkey_wakeup_hotunplug_complete_notify_change();
2224#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2225 hotkey_poll_setup_safe(0); 2312 hotkey_poll_setup_safe(0);
2226#endif
2227} 2313}
2228 2314
2229/* procfs -------------------------------------------------------------- */ 2315/* procfs -------------------------------------------------------------- */
@@ -2676,6 +2762,8 @@ static struct ibm_struct wan_driver_data = {
2676 * Video subdriver 2762 * Video subdriver
2677 */ 2763 */
2678 2764
2765#ifdef CONFIG_THINKPAD_ACPI_VIDEO
2766
2679enum video_access_mode { 2767enum video_access_mode {
2680 TPACPI_VIDEO_NONE = 0, 2768 TPACPI_VIDEO_NONE = 0,
2681 TPACPI_VIDEO_570, /* 570 */ 2769 TPACPI_VIDEO_570, /* 570 */
@@ -2703,13 +2791,6 @@ static int video_orig_autosw;
2703static int video_autosw_get(void); 2791static int video_autosw_get(void);
2704static int video_autosw_set(int enable); 2792static int video_autosw_set(int enable);
2705 2793
2706TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
2707 "\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
2708 "\\_SB.PCI0.VID0", /* 770e */
2709 "\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
2710 "\\_SB.PCI0.AGP.VID", /* all others */
2711 ); /* R30, R31 */
2712
2713TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */ 2794TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
2714 2795
2715static int __init video_init(struct ibm_init_struct *iibm) 2796static int __init video_init(struct ibm_init_struct *iibm)
@@ -3019,6 +3100,8 @@ static struct ibm_struct video_driver_data = {
3019 .exit = video_exit, 3100 .exit = video_exit,
3020}; 3101};
3021 3102
3103#endif /* CONFIG_THINKPAD_ACPI_VIDEO */
3104
3022/************************************************************************* 3105/*************************************************************************
3023 * Light (thinklight) subdriver 3106 * Light (thinklight) subdriver
3024 */ 3107 */
@@ -5803,10 +5886,12 @@ static struct ibm_init_struct ibms_init[] __initdata = {
5803 .init = wan_init, 5886 .init = wan_init,
5804 .data = &wan_driver_data, 5887 .data = &wan_driver_data,
5805 }, 5888 },
5889#ifdef CONFIG_THINKPAD_ACPI_VIDEO
5806 { 5890 {
5807 .init = video_init, 5891 .init = video_init,
5808 .data = &video_driver_data, 5892 .data = &video_driver_data,
5809 }, 5893 },
5894#endif
5810 { 5895 {
5811 .init = light_init, 5896 .init = light_init,
5812 .data = &light_driver_data, 5897 .data = &light_driver_data,
@@ -5918,7 +6003,7 @@ MODULE_PARM_DESC(hotkey_report_mode,
5918 6003
5919#define TPACPI_PARAM(feature) \ 6004#define TPACPI_PARAM(feature) \
5920 module_param_call(feature, set_ibm_param, NULL, NULL, 0); \ 6005 module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
5921 MODULE_PARM_DESC(feature, "Simulates thinkpad-aci procfs command " \ 6006 MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command " \
5922 "at module load, see documentation") 6007 "at module load, see documentation")
5923 6008
5924TPACPI_PARAM(hotkey); 6009TPACPI_PARAM(hotkey);
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 9b430f20b640..28cc6787a800 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -184,26 +184,26 @@ int get_sb_mtd(struct file_system_type *fs_type, int flags,
184 ret = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); 184 ret = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
185 185
186 DEBUG(1, "MTDSB: path_lookup() returned %d, inode %p\n", 186 DEBUG(1, "MTDSB: path_lookup() returned %d, inode %p\n",
187 ret, nd.dentry ? nd.dentry->d_inode : NULL); 187 ret, nd.path.dentry ? nd.path.dentry->d_inode : NULL);
188 188
189 if (ret) 189 if (ret)
190 return ret; 190 return ret;
191 191
192 ret = -EINVAL; 192 ret = -EINVAL;
193 193
194 if (!S_ISBLK(nd.dentry->d_inode->i_mode)) 194 if (!S_ISBLK(nd.path.dentry->d_inode->i_mode))
195 goto out; 195 goto out;
196 196
197 if (nd.mnt->mnt_flags & MNT_NODEV) { 197 if (nd.path.mnt->mnt_flags & MNT_NODEV) {
198 ret = -EACCES; 198 ret = -EACCES;
199 goto out; 199 goto out;
200 } 200 }
201 201
202 if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) 202 if (imajor(nd.path.dentry->d_inode) != MTD_BLOCK_MAJOR)
203 goto not_an_MTD_device; 203 goto not_an_MTD_device;
204 204
205 mtdnr = iminor(nd.dentry->d_inode); 205 mtdnr = iminor(nd.path.dentry->d_inode);
206 path_release(&nd); 206 path_put(&nd.path);
207 207
208 return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super, 208 return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super,
209 mnt); 209 mnt);
@@ -214,7 +214,7 @@ not_an_MTD_device:
214 "MTD: Attempt to mount non-MTD device \"%s\"\n", 214 "MTD: Attempt to mount non-MTD device \"%s\"\n",
215 dev_name); 215 dev_name);
216out: 216out:
217 path_release(&nd); 217 path_put(&nd.path);
218 return ret; 218 return ret;
219 219
220} 220}
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index eef6fecfff2a..be6e918456d9 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -168,7 +168,7 @@ static int debug = -1;
168 * Warning: 64K ring has hardware issues and may lock up. 168 * Warning: 64K ring has hardware issues and may lock up.
169 */ 169 */
170#if defined(CONFIG_SH_DREAMCAST) 170#if defined(CONFIG_SH_DREAMCAST)
171#define RX_BUF_IDX 1 /* 16K ring */ 171#define RX_BUF_IDX 0 /* 8K ring */
172#else 172#else
173#define RX_BUF_IDX 2 /* 32K ring */ 173#define RX_BUF_IDX 2 /* 32K ring */
174#endif 174#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 50c2b60e1fee..f337800076c0 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -931,6 +931,14 @@ config ENC28J60_WRITEVERIFY
931 Enable the verify after the buffer write useful for debugging purpose. 931 Enable the verify after the buffer write useful for debugging purpose.
932 If unsure, say N. 932 If unsure, say N.
933 933
934config DM9000_DEBUGLEVEL
935 int "DM9000 maximum debug level"
936 depends on DM9000
937 default 4
938 help
939 The maximum level of debugging code compiled into the DM9000
940 driver.
941
934config SMC911X 942config SMC911X
935 tristate "SMSC LAN911[5678] support" 943 tristate "SMSC LAN911[5678] support"
936 select CRC32 944 select CRC32
@@ -2352,6 +2360,16 @@ config GELIC_NET
2352 To compile this driver as a module, choose M here: the 2360 To compile this driver as a module, choose M here: the
2353 module will be called ps3_gelic. 2361 module will be called ps3_gelic.
2354 2362
2363config GELIC_WIRELESS
2364 bool "PS3 Wireless support"
2365 depends on GELIC_NET
2366 help
2367 This option adds the support for the wireless feature of PS3.
2368 If you have the wireless-less model of PS3 or have no plan to
2369 use wireless feature, disabling this option saves memory. As
2370 the driver automatically distinguishes the models, you can
2371 safely enable this option even if you have a wireless-less model.
2372
2355config GIANFAR 2373config GIANFAR
2356 tristate "Gianfar Ethernet" 2374 tristate "Gianfar Ethernet"
2357 depends on FSL_SOC 2375 depends on FSL_SOC
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 9fc7794e88ea..3b1ea321dc05 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -70,7 +70,8 @@ obj-$(CONFIG_BNX2X) += bnx2x.o
70spidernet-y += spider_net.o spider_net_ethtool.o 70spidernet-y += spider_net.o spider_net_ethtool.o
71obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o 71obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o
72obj-$(CONFIG_GELIC_NET) += ps3_gelic.o 72obj-$(CONFIG_GELIC_NET) += ps3_gelic.o
73ps3_gelic-objs += ps3_gelic_net.o 73gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o
74ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
74obj-$(CONFIG_TC35815) += tc35815.o 75obj-$(CONFIG_TC35815) += tc35815.o
75obj-$(CONFIG_SKGE) += skge.o 76obj-$(CONFIG_SKGE) += skge.o
76obj-$(CONFIG_SKY2) += sky2.o 77obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 471c7f3e8a4a..15853be4680a 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56 56
57#define DRV_MODULE_NAME "bnx2" 57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 58#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.7.3" 59#define DRV_MODULE_VERSION "1.7.4"
60#define DRV_MODULE_RELDATE "January 29, 2008" 60#define DRV_MODULE_RELDATE "February 18, 2008"
61 61
62#define RUN_AT(x) (jiffies + (x)) 62#define RUN_AT(x) (jiffies + (x))
63 63
@@ -1273,14 +1273,20 @@ bnx2_set_link(struct bnx2 *bp)
1273 1273
1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && 1274 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) { 1275 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1276 u32 val; 1276 u32 val, an_dbg;
1277 1277
1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { 1278 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1279 bnx2_5706s_force_link_dn(bp, 0); 1279 bnx2_5706s_force_link_dn(bp, 0);
1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; 1280 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1281 } 1281 }
1282 val = REG_RD(bp, BNX2_EMAC_STATUS); 1282 val = REG_RD(bp, BNX2_EMAC_STATUS);
1283 if (val & BNX2_EMAC_STATUS_LINK) 1283
1284 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1285 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1286 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1287
1288 if ((val & BNX2_EMAC_STATUS_LINK) &&
1289 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1284 bmsr |= BMSR_LSTATUS; 1290 bmsr |= BMSR_LSTATUS;
1285 else 1291 else
1286 bmsr &= ~BMSR_LSTATUS; 1292 bmsr &= ~BMSR_LSTATUS;
@@ -5356,11 +5362,15 @@ bnx2_test_intr(struct bnx2 *bp)
5356 return -ENODEV; 5362 return -ENODEV;
5357} 5363}
5358 5364
5365/* Determining link for parallel detection. */
5359static int 5366static int
5360bnx2_5706_serdes_has_link(struct bnx2 *bp) 5367bnx2_5706_serdes_has_link(struct bnx2 *bp)
5361{ 5368{
5362 u32 mode_ctl, an_dbg, exp; 5369 u32 mode_ctl, an_dbg, exp;
5363 5370
5371 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5372 return 0;
5373
5364 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL); 5374 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5365 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl); 5375 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5366 5376
@@ -5390,13 +5400,6 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5390 int check_link = 1; 5400 int check_link = 1;
5391 5401
5392 spin_lock(&bp->phy_lock); 5402 spin_lock(&bp->phy_lock);
5393 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
5394 bnx2_5706s_force_link_dn(bp, 0);
5395 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
5396 spin_unlock(&bp->phy_lock);
5397 return;
5398 }
5399
5400 if (bp->serdes_an_pending) { 5403 if (bp->serdes_an_pending) {
5401 bp->serdes_an_pending--; 5404 bp->serdes_an_pending--;
5402 check_link = 0; 5405 check_link = 0;
@@ -5420,7 +5423,6 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5420 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) { 5423 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5421 u32 phy2; 5424 u32 phy2;
5422 5425
5423 check_link = 0;
5424 bnx2_write_phy(bp, 0x17, 0x0f01); 5426 bnx2_write_phy(bp, 0x17, 0x0f01);
5425 bnx2_read_phy(bp, 0x15, &phy2); 5427 bnx2_read_phy(bp, 0x15, &phy2);
5426 if (phy2 & 0x20) { 5428 if (phy2 & 0x20) {
@@ -5435,17 +5437,21 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5435 } else 5437 } else
5436 bp->current_interval = bp->timer_interval; 5438 bp->current_interval = bp->timer_interval;
5437 5439
5438 if (bp->link_up && (bp->autoneg & AUTONEG_SPEED) && check_link) { 5440 if (check_link) {
5439 u32 val; 5441 u32 val;
5440 5442
5441 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); 5443 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5442 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); 5444 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5443 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val); 5445 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5444 5446
5445 if (val & MISC_SHDW_AN_DBG_NOSYNC) { 5447 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5446 bnx2_5706s_force_link_dn(bp, 1); 5448 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5447 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN; 5449 bnx2_5706s_force_link_dn(bp, 1);
5448 } 5450 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5451 } else
5452 bnx2_set_link(bp);
5453 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5454 bnx2_set_link(bp);
5449 } 5455 }
5450 spin_unlock(&bp->phy_lock); 5456 spin_unlock(&bp->phy_lock);
5451} 5457}
@@ -7326,7 +7332,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7326 bp->flags |= BNX2_FLAG_NO_WOL; 7332 bp->flags |= BNX2_FLAG_NO_WOL;
7327 bp->wol = 0; 7333 bp->wol = 0;
7328 } 7334 }
7329 if (CHIP_NUM(bp) != CHIP_NUM_5706) { 7335 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7336 /* Don't do parallel detect on this board because of
7337 * some board problems. The link will not go down
7338 * if we do parallel detect.
7339 */
7340 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7341 pdev->subsystem_device == 0x310c)
7342 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7343 } else {
7330 bp->phy_addr = 2; 7344 bp->phy_addr = 2;
7331 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) 7345 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7332 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; 7346 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 3aa0364942e2..1eaf5bb3d9c2 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6673,6 +6673,7 @@ struct bnx2 {
6673#define BNX2_PHY_FLAG_DIS_EARLY_DAC 0x00000400 6673#define BNX2_PHY_FLAG_DIS_EARLY_DAC 0x00000400
6674#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800 6674#define BNX2_PHY_FLAG_REMOTE_PHY_CAP 0x00000800
6675#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000 6675#define BNX2_PHY_FLAG_FORCED_DOWN 0x00001000
6676#define BNX2_PHY_FLAG_NO_PARALLEL 0x00002000
6676 6677
6677 u32 mii_bmcr; 6678 u32 mii_bmcr;
6678 u32 mii_bmsr; 6679 u32 mii_bmsr;
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 17ed4c3527b7..865faee53e17 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -404,7 +404,7 @@ found:
404 if (neigh->nud_state & NUD_FAILED) { 404 if (neigh->nud_state & NUD_FAILED) {
405 arpq = e->arpq_head; 405 arpq = e->arpq_head;
406 e->arpq_head = e->arpq_tail = NULL; 406 e->arpq_head = e->arpq_tail = NULL;
407 } else if (neigh_is_connected(neigh)) 407 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
408 setup_l2e_send_pending(dev, NULL, e); 408 setup_l2e_send_pending(dev, NULL, e);
409 } else { 409 } else {
410 e->state = neigh_is_connected(neigh) ? 410 e->state = neigh_is_connected(neigh) ?
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 9ca8c66abd16..979f3fc5e765 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1059,6 +1059,14 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1059 htonl(V_WR_TID(q->token))); 1059 htonl(V_WR_TID(q->token)));
1060} 1060}
1061 1061
1062static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1063 struct sge_txq *q)
1064{
1065 netif_stop_queue(dev);
1066 set_bit(TXQ_ETH, &qs->txq_stopped);
1067 q->stops++;
1068}
1069
1062/** 1070/**
1063 * eth_xmit - add a packet to the Ethernet Tx queue 1071 * eth_xmit - add a packet to the Ethernet Tx queue
1064 * @skb: the packet 1072 * @skb: the packet
@@ -1090,31 +1098,18 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1090 ndesc = calc_tx_descs(skb); 1098 ndesc = calc_tx_descs(skb);
1091 1099
1092 if (unlikely(credits < ndesc)) { 1100 if (unlikely(credits < ndesc)) {
1093 if (!netif_queue_stopped(dev)) { 1101 t3_stop_queue(dev, qs, q);
1094 netif_stop_queue(dev); 1102 dev_err(&adap->pdev->dev,
1095 set_bit(TXQ_ETH, &qs->txq_stopped); 1103 "%s: Tx ring %u full while queue awake!\n",
1096 q->stops++; 1104 dev->name, q->cntxt_id & 7);
1097 dev_err(&adap->pdev->dev,
1098 "%s: Tx ring %u full while queue awake!\n",
1099 dev->name, q->cntxt_id & 7);
1100 }
1101 spin_unlock(&q->lock); 1105 spin_unlock(&q->lock);
1102 return NETDEV_TX_BUSY; 1106 return NETDEV_TX_BUSY;
1103 } 1107 }
1104 1108
1105 q->in_use += ndesc; 1109 q->in_use += ndesc;
1106 if (unlikely(credits - ndesc < q->stop_thres)) { 1110 if (unlikely(credits - ndesc < q->stop_thres))
1107 q->stops++; 1111 if (USE_GTS || !should_restart_tx(q))
1108 netif_stop_queue(dev); 1112 t3_stop_queue(dev, qs, q);
1109 set_bit(TXQ_ETH, &qs->txq_stopped);
1110#if !USE_GTS
1111 if (should_restart_tx(q) &&
1112 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1113 q->restarts++;
1114 netif_wake_queue(dev);
1115 }
1116#endif
1117 }
1118 1113
1119 gen = q->gen; 1114 gen = q->gen;
1120 q->unacked += ndesc; 1115 q->unacked += ndesc;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 6a20a5491a96..1fe305ca2cf0 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * dm9000.c: Version 1.2 03/18/2003 2 * Davicom DM9000 Fast Ethernet driver for Linux.
3 *
4 * A Davicom DM9000 ISA NIC fast Ethernet driver for Linux.
5 * Copyright (C) 1997 Sten Wang 3 * Copyright (C) 1997 Sten Wang
6 * 4 *
7 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -14,44 +12,11 @@
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 13 * GNU General Public License for more details.
16 * 14 *
17 * (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 15 * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
18 *
19 * V0.11 06/20/2001 REG_0A bit3=1, default enable BP with DA match
20 * 06/22/2001 Support DM9801 progrmming
21 * E3: R25 = ((R24 + NF) & 0x00ff) | 0xf000
22 * E4: R25 = ((R24 + NF) & 0x00ff) | 0xc200
23 * R17 = (R17 & 0xfff0) | NF + 3
24 * E5: R25 = ((R24 + NF - 3) & 0x00ff) | 0xc200
25 * R17 = (R17 & 0xfff0) | NF
26 *
27 * v1.00 modify by simon 2001.9.5
28 * change for kernel 2.4.x
29 *
30 * v1.1 11/09/2001 fix force mode bug
31 *
32 * v1.2 03/18/2003 Weilun Huang <weilun_huang@davicom.com.tw>:
33 * Fixed phy reset.
34 * Added tx/rx 32 bit mode.
35 * Cleaned up for kernel merge.
36 *
37 * 03/03/2004 Sascha Hauer <s.hauer@pengutronix.de>
38 * Port to 2.6 kernel
39 *
40 * 24-Sep-2004 Ben Dooks <ben@simtec.co.uk>
41 * Cleanup of code to remove ifdefs
42 * Allowed platform device data to influence access width
43 * Reformatting areas of code
44 *
45 * 17-Mar-2005 Sascha Hauer <s.hauer@pengutronix.de>
46 * * removed 2.4 style module parameters
47 * * removed removed unused stat counter and fixed
48 * net_device_stats
49 * * introduced tx_timeout function
50 * * reworked locking
51 * 16 *
52 * 01-Jul-2005 Ben Dooks <ben@simtec.co.uk> 17 * Additional updates, Copyright:
53 * * fixed spinlock call without pointer 18 * Ben Dooks <ben@simtec.co.uk>
54 * * ensure spinlock is initialised 19 * Sascha Hauer <s.hauer@pengutronix.de>
55 */ 20 */
56 21
57#include <linux/module.h> 22#include <linux/module.h>
@@ -63,6 +28,7 @@
63#include <linux/spinlock.h> 28#include <linux/spinlock.h>
64#include <linux/crc32.h> 29#include <linux/crc32.h>
65#include <linux/mii.h> 30#include <linux/mii.h>
31#include <linux/ethtool.h>
66#include <linux/dm9000.h> 32#include <linux/dm9000.h>
67#include <linux/delay.h> 33#include <linux/delay.h>
68#include <linux/platform_device.h> 34#include <linux/platform_device.h>
@@ -80,30 +46,7 @@
80 46
81#define CARDNAME "dm9000" 47#define CARDNAME "dm9000"
82#define PFX CARDNAME ": " 48#define PFX CARDNAME ": "
83 49#define DRV_VERSION "1.30"
84#define DM9000_TIMER_WUT jiffies+(HZ*2) /* timer wakeup time : 2 second */
85
86#define DM9000_DEBUG 0
87
88#if DM9000_DEBUG > 2
89#define PRINTK3(args...) printk(CARDNAME ": " args)
90#else
91#define PRINTK3(args...) do { } while(0)
92#endif
93
94#if DM9000_DEBUG > 1
95#define PRINTK2(args...) printk(CARDNAME ": " args)
96#else
97#define PRINTK2(args...) do { } while(0)
98#endif
99
100#if DM9000_DEBUG > 0
101#define PRINTK1(args...) printk(CARDNAME ": " args)
102#define PRINTK(args...) printk(CARDNAME ": " args)
103#else
104#define PRINTK1(args...) do { } while(0)
105#define PRINTK(args...) printk(KERN_DEBUG args)
106#endif
107 50
108#ifdef CONFIG_BLACKFIN 51#ifdef CONFIG_BLACKFIN
109#define readsb insb 52#define readsb insb
@@ -112,9 +55,9 @@
112#define writesb outsb 55#define writesb outsb
113#define writesw outsw 56#define writesw outsw
114#define writesl outsl 57#define writesl outsl
115#define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQF_TRIGGER_HIGH) 58#define DEFAULT_TRIGGER IRQF_TRIGGER_HIGH
116#else 59#else
117#define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQT_RISING) 60#define DEFAULT_TRIGGER (0)
118#endif 61#endif
119 62
120/* 63/*
@@ -124,6 +67,24 @@ static int watchdog = 5000;
124module_param(watchdog, int, 0400); 67module_param(watchdog, int, 0400);
125MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 68MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
126 69
70/* DM9000 register address locking.
71 *
72 * The DM9000 uses an address register to control where data written
73 * to the data register goes. This means that the address register
74 * must be preserved over interrupts or similar calls.
75 *
76 * During interrupt and other critical calls, a spinlock is used to
77 * protect the system, but the calls themselves save the address
78 * in the address register in case they are interrupting another
79 * access to the device.
80 *
81 * For general accesses a lock is provided so that calls which are
82 * allowed to sleep are serialised so that the address register does
83 * not need to be saved. This lock also serves to serialise access
84 * to the EEPROM and PHY access registers which are shared between
85 * these two devices.
86 */
87
127/* Structure/enum declaration ------------------------------- */ 88/* Structure/enum declaration ------------------------------- */
128typedef struct board_info { 89typedef struct board_info {
129 90
@@ -137,33 +98,52 @@ typedef struct board_info {
137 u16 dbug_cnt; 98 u16 dbug_cnt;
138 u8 io_mode; /* 0:word, 2:byte */ 99 u8 io_mode; /* 0:word, 2:byte */
139 u8 phy_addr; 100 u8 phy_addr;
101 unsigned int flags;
102 unsigned int in_suspend :1;
103
104 int debug_level;
140 105
141 void (*inblk)(void __iomem *port, void *data, int length); 106 void (*inblk)(void __iomem *port, void *data, int length);
142 void (*outblk)(void __iomem *port, void *data, int length); 107 void (*outblk)(void __iomem *port, void *data, int length);
143 void (*dumpblk)(void __iomem *port, int length); 108 void (*dumpblk)(void __iomem *port, int length);
144 109
110 struct device *dev; /* parent device */
111
145 struct resource *addr_res; /* resources found */ 112 struct resource *addr_res; /* resources found */
146 struct resource *data_res; 113 struct resource *data_res;
147 struct resource *addr_req; /* resources requested */ 114 struct resource *addr_req; /* resources requested */
148 struct resource *data_req; 115 struct resource *data_req;
149 struct resource *irq_res; 116 struct resource *irq_res;
150 117
151 struct timer_list timer; 118 struct mutex addr_lock; /* phy and eeprom access lock */
152 unsigned char srom[128]; 119
153 spinlock_t lock; 120 spinlock_t lock;
154 121
155 struct mii_if_info mii; 122 struct mii_if_info mii;
156 u32 msg_enable; 123 u32 msg_enable;
157} board_info_t; 124} board_info_t;
158 125
126/* debug code */
127
128#define dm9000_dbg(db, lev, msg...) do { \
129 if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \
130 (lev) < db->debug_level) { \
131 dev_dbg(db->dev, msg); \
132 } \
133} while (0)
134
135static inline board_info_t *to_dm9000_board(struct net_device *dev)
136{
137 return dev->priv;
138}
139
159/* function declaration ------------------------------------- */ 140/* function declaration ------------------------------------- */
160static int dm9000_probe(struct platform_device *); 141static int dm9000_probe(struct platform_device *);
161static int dm9000_open(struct net_device *); 142static int dm9000_open(struct net_device *);
162static int dm9000_start_xmit(struct sk_buff *, struct net_device *); 143static int dm9000_start_xmit(struct sk_buff *, struct net_device *);
163static int dm9000_stop(struct net_device *); 144static int dm9000_stop(struct net_device *);
145static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
164 146
165
166static void dm9000_timer(unsigned long);
167static void dm9000_init_dm9000(struct net_device *); 147static void dm9000_init_dm9000(struct net_device *);
168 148
169static irqreturn_t dm9000_interrupt(int, void *); 149static irqreturn_t dm9000_interrupt(int, void *);
@@ -171,20 +151,19 @@ static irqreturn_t dm9000_interrupt(int, void *);
171static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg); 151static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg);
172static void dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, 152static void dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg,
173 int value); 153 int value);
174static u16 read_srom_word(board_info_t *, int); 154
155static void dm9000_read_eeprom(board_info_t *, int addr, u8 *to);
156static void dm9000_write_eeprom(board_info_t *, int addr, u8 *dp);
175static void dm9000_rx(struct net_device *); 157static void dm9000_rx(struct net_device *);
176static void dm9000_hash_table(struct net_device *); 158static void dm9000_hash_table(struct net_device *);
177 159
178//#define DM9000_PROGRAM_EEPROM
179#ifdef DM9000_PROGRAM_EEPROM
180static void program_eeprom(board_info_t * db);
181#endif
182/* DM9000 network board routine ---------------------------- */ 160/* DM9000 network board routine ---------------------------- */
183 161
184static void 162static void
185dm9000_reset(board_info_t * db) 163dm9000_reset(board_info_t * db)
186{ 164{
187 PRINTK1("dm9000x: resetting\n"); 165 dev_dbg(db->dev, "resetting device\n");
166
188 /* RESET device */ 167 /* RESET device */
189 writeb(DM9000_NCR, db->io_addr); 168 writeb(DM9000_NCR, db->io_addr);
190 udelay(200); 169 udelay(200);
@@ -300,14 +279,10 @@ static void dm9000_set_io(struct board_info *db, int byte_width)
300 db->inblk = dm9000_inblk_8bit; 279 db->inblk = dm9000_inblk_8bit;
301 break; 280 break;
302 281
303 case 2:
304 db->dumpblk = dm9000_dumpblk_16bit;
305 db->outblk = dm9000_outblk_16bit;
306 db->inblk = dm9000_inblk_16bit;
307 break;
308 282
309 case 3: 283 case 3:
310 printk(KERN_ERR PFX ": 3 byte IO, falling back to 16bit\n"); 284 dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n");
285 case 2:
311 db->dumpblk = dm9000_dumpblk_16bit; 286 db->dumpblk = dm9000_dumpblk_16bit;
312 db->outblk = dm9000_outblk_16bit; 287 db->outblk = dm9000_outblk_16bit;
313 db->inblk = dm9000_inblk_16bit; 288 db->inblk = dm9000_inblk_16bit;
@@ -358,6 +333,139 @@ static void dm9000_poll_controller(struct net_device *dev)
358} 333}
359#endif 334#endif
360 335
336static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
337{
338 board_info_t *dm = to_dm9000_board(dev);
339
340 if (!netif_running(dev))
341 return -EINVAL;
342
343 return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL);
344}
345
346/* ethtool ops */
347
348static void dm9000_get_drvinfo(struct net_device *dev,
349 struct ethtool_drvinfo *info)
350{
351 board_info_t *dm = to_dm9000_board(dev);
352
353 strcpy(info->driver, CARDNAME);
354 strcpy(info->version, DRV_VERSION);
355 strcpy(info->bus_info, to_platform_device(dm->dev)->name);
356}
357
358static u32 dm9000_get_msglevel(struct net_device *dev)
359{
360 board_info_t *dm = to_dm9000_board(dev);
361
362 return dm->msg_enable;
363}
364
365static void dm9000_set_msglevel(struct net_device *dev, u32 value)
366{
367 board_info_t *dm = to_dm9000_board(dev);
368
369 dm->msg_enable = value;
370}
371
372static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
373{
374 board_info_t *dm = to_dm9000_board(dev);
375
376 mii_ethtool_gset(&dm->mii, cmd);
377 return 0;
378}
379
380static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
381{
382 board_info_t *dm = to_dm9000_board(dev);
383
384 return mii_ethtool_sset(&dm->mii, cmd);
385}
386
387static int dm9000_nway_reset(struct net_device *dev)
388{
389 board_info_t *dm = to_dm9000_board(dev);
390 return mii_nway_restart(&dm->mii);
391}
392
393static u32 dm9000_get_link(struct net_device *dev)
394{
395 board_info_t *dm = to_dm9000_board(dev);
396 return mii_link_ok(&dm->mii);
397}
398
399#define DM_EEPROM_MAGIC (0x444D394B)
400
401static int dm9000_get_eeprom_len(struct net_device *dev)
402{
403 return 128;
404}
405
406static int dm9000_get_eeprom(struct net_device *dev,
407 struct ethtool_eeprom *ee, u8 *data)
408{
409 board_info_t *dm = to_dm9000_board(dev);
410 int offset = ee->offset;
411 int len = ee->len;
412 int i;
413
414 /* EEPROM access is aligned to two bytes */
415
416 if ((len & 1) != 0 || (offset & 1) != 0)
417 return -EINVAL;
418
419 if (dm->flags & DM9000_PLATF_NO_EEPROM)
420 return -ENOENT;
421
422 ee->magic = DM_EEPROM_MAGIC;
423
424 for (i = 0; i < len; i += 2)
425 dm9000_read_eeprom(dm, (offset + i) / 2, data + i);
426
427 return 0;
428}
429
430static int dm9000_set_eeprom(struct net_device *dev,
431 struct ethtool_eeprom *ee, u8 *data)
432{
433 board_info_t *dm = to_dm9000_board(dev);
434 int offset = ee->offset;
435 int len = ee->len;
436 int i;
437
438 /* EEPROM access is aligned to two bytes */
439
440 if ((len & 1) != 0 || (offset & 1) != 0)
441 return -EINVAL;
442
443 if (dm->flags & DM9000_PLATF_NO_EEPROM)
444 return -ENOENT;
445
446 if (ee->magic != DM_EEPROM_MAGIC)
447 return -EINVAL;
448
449 for (i = 0; i < len; i += 2)
450 dm9000_write_eeprom(dm, (offset + i) / 2, data + i);
451
452 return 0;
453}
454
455static const struct ethtool_ops dm9000_ethtool_ops = {
456 .get_drvinfo = dm9000_get_drvinfo,
457 .get_settings = dm9000_get_settings,
458 .set_settings = dm9000_set_settings,
459 .get_msglevel = dm9000_get_msglevel,
460 .set_msglevel = dm9000_set_msglevel,
461 .nway_reset = dm9000_nway_reset,
462 .get_link = dm9000_get_link,
463 .get_eeprom_len = dm9000_get_eeprom_len,
464 .get_eeprom = dm9000_get_eeprom,
465 .set_eeprom = dm9000_set_eeprom,
466};
467
468
361/* dm9000_release_board 469/* dm9000_release_board
362 * 470 *
363 * release a board, and any mapped resources 471 * release a board, and any mapped resources
@@ -401,6 +509,7 @@ dm9000_probe(struct platform_device *pdev)
401 struct dm9000_plat_data *pdata = pdev->dev.platform_data; 509 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
402 struct board_info *db; /* Point a board information structure */ 510 struct board_info *db; /* Point a board information structure */
403 struct net_device *ndev; 511 struct net_device *ndev;
512 const unsigned char *mac_src;
404 unsigned long base; 513 unsigned long base;
405 int ret = 0; 514 int ret = 0;
406 int iosize; 515 int iosize;
@@ -410,19 +519,22 @@ dm9000_probe(struct platform_device *pdev)
410 /* Init network device */ 519 /* Init network device */
411 ndev = alloc_etherdev(sizeof (struct board_info)); 520 ndev = alloc_etherdev(sizeof (struct board_info));
412 if (!ndev) { 521 if (!ndev) {
413 printk("%s: could not allocate device.\n", CARDNAME); 522 dev_err(&pdev->dev, "could not allocate device.\n");
414 return -ENOMEM; 523 return -ENOMEM;
415 } 524 }
416 525
417 SET_NETDEV_DEV(ndev, &pdev->dev); 526 SET_NETDEV_DEV(ndev, &pdev->dev);
418 527
419 PRINTK2("dm9000_probe()"); 528 dev_dbg(&pdev->dev, "dm9000_probe()");
420 529
421 /* setup board info structure */ 530 /* setup board info structure */
422 db = (struct board_info *) ndev->priv; 531 db = (struct board_info *) ndev->priv;
423 memset(db, 0, sizeof (*db)); 532 memset(db, 0, sizeof (*db));
424 533
534 db->dev = &pdev->dev;
535
425 spin_lock_init(&db->lock); 536 spin_lock_init(&db->lock);
537 mutex_init(&db->addr_lock);
426 538
427 if (pdev->num_resources < 2) { 539 if (pdev->num_resources < 2) {
428 ret = -ENODEV; 540 ret = -ENODEV;
@@ -450,7 +562,7 @@ dm9000_probe(struct platform_device *pdev)
450 562
451 if (db->addr_res == NULL || db->data_res == NULL || 563 if (db->addr_res == NULL || db->data_res == NULL ||
452 db->irq_res == NULL) { 564 db->irq_res == NULL) {
453 printk(KERN_ERR PFX "insufficient resources\n"); 565 dev_err(db->dev, "insufficient resources\n");
454 ret = -ENOENT; 566 ret = -ENOENT;
455 goto out; 567 goto out;
456 } 568 }
@@ -460,7 +572,7 @@ dm9000_probe(struct platform_device *pdev)
460 pdev->name); 572 pdev->name);
461 573
462 if (db->addr_req == NULL) { 574 if (db->addr_req == NULL) {
463 printk(KERN_ERR PFX "cannot claim address reg area\n"); 575 dev_err(db->dev, "cannot claim address reg area\n");
464 ret = -EIO; 576 ret = -EIO;
465 goto out; 577 goto out;
466 } 578 }
@@ -468,7 +580,7 @@ dm9000_probe(struct platform_device *pdev)
468 db->io_addr = ioremap(db->addr_res->start, i); 580 db->io_addr = ioremap(db->addr_res->start, i);
469 581
470 if (db->io_addr == NULL) { 582 if (db->io_addr == NULL) {
471 printk(KERN_ERR "failed to ioremap address reg\n"); 583 dev_err(db->dev, "failed to ioremap address reg\n");
472 ret = -EINVAL; 584 ret = -EINVAL;
473 goto out; 585 goto out;
474 } 586 }
@@ -478,7 +590,7 @@ dm9000_probe(struct platform_device *pdev)
478 pdev->name); 590 pdev->name);
479 591
480 if (db->data_req == NULL) { 592 if (db->data_req == NULL) {
481 printk(KERN_ERR PFX "cannot claim data reg area\n"); 593 dev_err(db->dev, "cannot claim data reg area\n");
482 ret = -EIO; 594 ret = -EIO;
483 goto out; 595 goto out;
484 } 596 }
@@ -486,7 +598,7 @@ dm9000_probe(struct platform_device *pdev)
486 db->io_data = ioremap(db->data_res->start, iosize); 598 db->io_data = ioremap(db->data_res->start, iosize);
487 599
488 if (db->io_data == NULL) { 600 if (db->io_data == NULL) {
489 printk(KERN_ERR "failed to ioremap data reg\n"); 601 dev_err(db->dev,"failed to ioremap data reg\n");
490 ret = -EINVAL; 602 ret = -EINVAL;
491 goto out; 603 goto out;
492 } 604 }
@@ -525,12 +637,14 @@ dm9000_probe(struct platform_device *pdev)
525 637
526 if (pdata->dumpblk != NULL) 638 if (pdata->dumpblk != NULL)
527 db->dumpblk = pdata->dumpblk; 639 db->dumpblk = pdata->dumpblk;
640
641 db->flags = pdata->flags;
528 } 642 }
529 643
530 dm9000_reset(db); 644 dm9000_reset(db);
531 645
532 /* try two times, DM9000 sometimes gets the first read wrong */ 646 /* try two times, DM9000 sometimes gets the first read wrong */
533 for (i = 0; i < 2; i++) { 647 for (i = 0; i < 8; i++) {
534 id_val = ior(db, DM9000_VIDL); 648 id_val = ior(db, DM9000_VIDL);
535 id_val |= (u32)ior(db, DM9000_VIDH) << 8; 649 id_val |= (u32)ior(db, DM9000_VIDH) << 8;
536 id_val |= (u32)ior(db, DM9000_PIDL) << 16; 650 id_val |= (u32)ior(db, DM9000_PIDL) << 16;
@@ -538,11 +652,11 @@ dm9000_probe(struct platform_device *pdev)
538 652
539 if (id_val == DM9000_ID) 653 if (id_val == DM9000_ID)
540 break; 654 break;
541 printk("%s: read wrong id 0x%08x\n", CARDNAME, id_val); 655 dev_err(db->dev, "read wrong id 0x%08x\n", id_val);
542 } 656 }
543 657
544 if (id_val != DM9000_ID) { 658 if (id_val != DM9000_ID) {
545 printk("%s: wrong id: 0x%08x\n", CARDNAME, id_val); 659 dev_err(db->dev, "wrong id: 0x%08x\n", id_val);
546 ret = -ENODEV; 660 ret = -ENODEV;
547 goto out; 661 goto out;
548 } 662 }
@@ -558,13 +672,13 @@ dm9000_probe(struct platform_device *pdev)
558 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 672 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
559 ndev->stop = &dm9000_stop; 673 ndev->stop = &dm9000_stop;
560 ndev->set_multicast_list = &dm9000_hash_table; 674 ndev->set_multicast_list = &dm9000_hash_table;
675 ndev->ethtool_ops = &dm9000_ethtool_ops;
676 ndev->do_ioctl = &dm9000_ioctl;
677
561#ifdef CONFIG_NET_POLL_CONTROLLER 678#ifdef CONFIG_NET_POLL_CONTROLLER
562 ndev->poll_controller = &dm9000_poll_controller; 679 ndev->poll_controller = &dm9000_poll_controller;
563#endif 680#endif
564 681
565#ifdef DM9000_PROGRAM_EEPROM
566 program_eeprom(db);
567#endif
568 db->msg_enable = NETIF_MSG_LINK; 682 db->msg_enable = NETIF_MSG_LINK;
569 db->mii.phy_id_mask = 0x1f; 683 db->mii.phy_id_mask = 0x1f;
570 db->mii.reg_num_mask = 0x1f; 684 db->mii.reg_num_mask = 0x1f;
@@ -574,38 +688,37 @@ dm9000_probe(struct platform_device *pdev)
574 db->mii.mdio_read = dm9000_phy_read; 688 db->mii.mdio_read = dm9000_phy_read;
575 db->mii.mdio_write = dm9000_phy_write; 689 db->mii.mdio_write = dm9000_phy_write;
576 690
577 /* Read SROM content */ 691 mac_src = "eeprom";
578 for (i = 0; i < 64; i++)
579 ((u16 *) db->srom)[i] = read_srom_word(db, i);
580 692
581 /* Set Node Address */ 693 /* try reading the node address from the attached EEPROM */
582 for (i = 0; i < 6; i++) 694 for (i = 0; i < 6; i += 2)
583 ndev->dev_addr[i] = db->srom[i]; 695 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
584 696
585 if (!is_valid_ether_addr(ndev->dev_addr)) { 697 if (!is_valid_ether_addr(ndev->dev_addr)) {
586 /* try reading from mac */ 698 /* try reading from mac */
587 699
700 mac_src = "chip";
588 for (i = 0; i < 6; i++) 701 for (i = 0; i < 6; i++)
589 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 702 ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
590 } 703 }
591 704
592 if (!is_valid_ether_addr(ndev->dev_addr)) 705 if (!is_valid_ether_addr(ndev->dev_addr))
593 printk("%s: Invalid ethernet MAC address. Please " 706 dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
594 "set using ifconfig\n", ndev->name); 707 "set using ifconfig\n", ndev->name);
595 708
596 platform_set_drvdata(pdev, ndev); 709 platform_set_drvdata(pdev, ndev);
597 ret = register_netdev(ndev); 710 ret = register_netdev(ndev);
598 711
599 if (ret == 0) { 712 if (ret == 0) {
600 DECLARE_MAC_BUF(mac); 713 DECLARE_MAC_BUF(mac);
601 printk("%s: dm9000 at %p,%p IRQ %d MAC: %s\n", 714 printk("%s: dm9000 at %p,%p IRQ %d MAC: %s (%s)\n",
602 ndev->name, db->io_addr, db->io_data, ndev->irq, 715 ndev->name, db->io_addr, db->io_data, ndev->irq,
603 print_mac(mac, ndev->dev_addr)); 716 print_mac(mac, ndev->dev_addr), mac_src);
604 } 717 }
605 return 0; 718 return 0;
606 719
607out: 720out:
608 printk("%s: not found (%d).\n", CARDNAME, ret); 721 dev_err(db->dev, "not found (%d).\n", ret);
609 722
610 dm9000_release_board(pdev, db); 723 dm9000_release_board(pdev, db);
611 free_netdev(ndev); 724 free_netdev(ndev);
@@ -621,10 +734,22 @@ static int
621dm9000_open(struct net_device *dev) 734dm9000_open(struct net_device *dev)
622{ 735{
623 board_info_t *db = (board_info_t *) dev->priv; 736 board_info_t *db = (board_info_t *) dev->priv;
737 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
624 738
625 PRINTK2("entering dm9000_open\n"); 739 if (netif_msg_ifup(db))
740 dev_dbg(db->dev, "enabling %s\n", dev->name);
626 741
627 if (request_irq(dev->irq, &dm9000_interrupt, DM9000_IRQ_FLAGS, dev->name, dev)) 742 /* If there is no IRQ type specified, default to something that
743 * may work, and tell the user that this is a problem */
744
745 if (irqflags == IRQF_TRIGGER_NONE) {
746 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
747 irqflags = DEFAULT_TRIGGER;
748 }
749
750 irqflags |= IRQF_SHARED;
751
752 if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev))
628 return -EAGAIN; 753 return -EAGAIN;
629 754
630 /* Initialize DM9000 board */ 755 /* Initialize DM9000 board */
@@ -634,13 +759,6 @@ dm9000_open(struct net_device *dev)
634 /* Init driver variable */ 759 /* Init driver variable */
635 db->dbug_cnt = 0; 760 db->dbug_cnt = 0;
636 761
637 /* set and active a timer process */
638 init_timer(&db->timer);
639 db->timer.expires = DM9000_TIMER_WUT;
640 db->timer.data = (unsigned long) dev;
641 db->timer.function = &dm9000_timer;
642 add_timer(&db->timer);
643
644 mii_check_media(&db->mii, netif_msg_link(db), 1); 762 mii_check_media(&db->mii, netif_msg_link(db), 1);
645 netif_start_queue(dev); 763 netif_start_queue(dev);
646 764
@@ -655,7 +773,7 @@ dm9000_init_dm9000(struct net_device *dev)
655{ 773{
656 board_info_t *db = (board_info_t *) dev->priv; 774 board_info_t *db = (board_info_t *) dev->priv;
657 775
658 PRINTK1("entering %s\n",__FUNCTION__); 776 dm9000_dbg(db, 1, "entering %s\n", __func__);
659 777
660 /* I/O mode */ 778 /* I/O mode */
661 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 779 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
@@ -665,6 +783,9 @@ dm9000_init_dm9000(struct net_device *dev)
665 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 783 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
666 iow(db, DM9000_GPR, 0); /* Enable PHY */ 784 iow(db, DM9000_GPR, 0); /* Enable PHY */
667 785
786 if (db->flags & DM9000_PLATF_EXT_PHY)
787 iow(db, DM9000_NCR, NCR_EXT_PHY);
788
668 /* Program operating register */ 789 /* Program operating register */
669 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 790 iow(db, DM9000_TCR, 0); /* TX Polling clear */
670 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */ 791 iow(db, DM9000_BPTR, 0x3f); /* Less 3Kb, 200us */
@@ -698,7 +819,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
698 unsigned long flags; 819 unsigned long flags;
699 board_info_t *db = (board_info_t *) dev->priv; 820 board_info_t *db = (board_info_t *) dev->priv;
700 821
701 PRINTK3("dm9000_start_xmit\n"); 822 dm9000_dbg(db, 3, "%s:\n", __func__);
702 823
703 if (db->tx_pkt_cnt > 1) 824 if (db->tx_pkt_cnt > 1)
704 return 1; 825 return 1;
@@ -715,8 +836,8 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
715 /* TX control: First packet immediately send, second packet queue */ 836 /* TX control: First packet immediately send, second packet queue */
716 if (db->tx_pkt_cnt == 1) { 837 if (db->tx_pkt_cnt == 1) {
717 /* Set TX length to DM9000 */ 838 /* Set TX length to DM9000 */
718 iow(db, DM9000_TXPLL, skb->len & 0xff); 839 iow(db, DM9000_TXPLL, skb->len);
719 iow(db, DM9000_TXPLH, (skb->len >> 8) & 0xff); 840 iow(db, DM9000_TXPLH, skb->len >> 8);
720 841
721 /* Issue TX polling command */ 842 /* Issue TX polling command */
722 iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ 843 iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */
@@ -757,10 +878,8 @@ dm9000_stop(struct net_device *ndev)
757{ 878{
758 board_info_t *db = (board_info_t *) ndev->priv; 879 board_info_t *db = (board_info_t *) ndev->priv;
759 880
760 PRINTK1("entering %s\n",__FUNCTION__); 881 if (netif_msg_ifdown(db))
761 882 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
762 /* deleted timer */
763 del_timer(&db->timer);
764 883
765 netif_stop_queue(ndev); 884 netif_stop_queue(ndev);
766 netif_carrier_off(ndev); 885 netif_carrier_off(ndev);
@@ -788,10 +907,13 @@ dm9000_tx_done(struct net_device *dev, board_info_t * db)
788 db->tx_pkt_cnt--; 907 db->tx_pkt_cnt--;
789 dev->stats.tx_packets++; 908 dev->stats.tx_packets++;
790 909
910 if (netif_msg_tx_done(db))
911 dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status);
912
791 /* Queue packet check & send */ 913 /* Queue packet check & send */
792 if (db->tx_pkt_cnt > 0) { 914 if (db->tx_pkt_cnt > 0) {
793 iow(db, DM9000_TXPLL, db->queue_pkt_len & 0xff); 915 iow(db, DM9000_TXPLL, db->queue_pkt_len);
794 iow(db, DM9000_TXPLH, (db->queue_pkt_len >> 8) & 0xff); 916 iow(db, DM9000_TXPLH, db->queue_pkt_len >> 8);
795 iow(db, DM9000_TCR, TCR_TXREQ); 917 iow(db, DM9000_TCR, TCR_TXREQ);
796 dev->trans_start = jiffies; 918 dev->trans_start = jiffies;
797 } 919 }
@@ -803,19 +925,14 @@ static irqreturn_t
803dm9000_interrupt(int irq, void *dev_id) 925dm9000_interrupt(int irq, void *dev_id)
804{ 926{
805 struct net_device *dev = dev_id; 927 struct net_device *dev = dev_id;
806 board_info_t *db; 928 board_info_t *db = (board_info_t *) dev->priv;
807 int int_status; 929 int int_status;
808 u8 reg_save; 930 u8 reg_save;
809 931
810 PRINTK3("entering %s\n",__FUNCTION__); 932 dm9000_dbg(db, 3, "entering %s\n", __func__);
811
812 if (!dev) {
813 PRINTK1("dm9000_interrupt() without DEVICE arg\n");
814 return IRQ_HANDLED;
815 }
816 933
817 /* A real interrupt coming */ 934 /* A real interrupt coming */
818 db = (board_info_t *) dev->priv; 935
819 spin_lock(&db->lock); 936 spin_lock(&db->lock);
820 937
821 /* Save previous register address */ 938 /* Save previous register address */
@@ -828,6 +945,9 @@ dm9000_interrupt(int irq, void *dev_id)
828 int_status = ior(db, DM9000_ISR); /* Got ISR */ 945 int_status = ior(db, DM9000_ISR); /* Got ISR */
829 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 946 iow(db, DM9000_ISR, int_status); /* Clear ISR status */
830 947
948 if (netif_msg_intr(db))
949 dev_dbg(db->dev, "interrupt status %02x\n", int_status);
950
831 /* Received the coming packet */ 951 /* Received the coming packet */
832 if (int_status & ISR_PRS) 952 if (int_status & ISR_PRS)
833 dm9000_rx(dev); 953 dm9000_rx(dev);
@@ -847,27 +967,9 @@ dm9000_interrupt(int irq, void *dev_id)
847 return IRQ_HANDLED; 967 return IRQ_HANDLED;
848} 968}
849 969
850/*
851 * A periodic timer routine
852 * Dynamic media sense, allocated Rx buffer...
853 */
854static void
855dm9000_timer(unsigned long data)
856{
857 struct net_device *dev = (struct net_device *) data;
858 board_info_t *db = (board_info_t *) dev->priv;
859
860 PRINTK3("dm9000_timer()\n");
861
862 mii_check_media(&db->mii, netif_msg_link(db), 0);
863
864 /* Set timer again */
865 db->timer.expires = DM9000_TIMER_WUT;
866 add_timer(&db->timer);
867}
868
869struct dm9000_rxhdr { 970struct dm9000_rxhdr {
870 u16 RxStatus; 971 u8 RxPktReady;
972 u8 RxStatus;
871 u16 RxLen; 973 u16 RxLen;
872} __attribute__((__packed__)); 974} __attribute__((__packed__));
873 975
@@ -893,7 +995,7 @@ dm9000_rx(struct net_device *dev)
893 995
894 /* Status check: this byte must be 0 or 1 */ 996 /* Status check: this byte must be 0 or 1 */
895 if (rxbyte > DM9000_PKT_RDY) { 997 if (rxbyte > DM9000_PKT_RDY) {
896 printk("status check failed: %d\n", rxbyte); 998 dev_warn(db->dev, "status check fail: %d\n", rxbyte);
897 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 999 iow(db, DM9000_RCR, 0x00); /* Stop Device */
898 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */ 1000 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
899 return; 1001 return;
@@ -908,30 +1010,38 @@ dm9000_rx(struct net_device *dev)
908 1010
909 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); 1011 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr));
910 1012
911 RxLen = rxhdr.RxLen; 1013 RxLen = le16_to_cpu(rxhdr.RxLen);
1014
1015 if (netif_msg_rx_status(db))
1016 dev_dbg(db->dev, "RX: status %02x, length %04x\n",
1017 rxhdr.RxStatus, RxLen);
912 1018
913 /* Packet Status check */ 1019 /* Packet Status check */
914 if (RxLen < 0x40) { 1020 if (RxLen < 0x40) {
915 GoodPacket = false; 1021 GoodPacket = false;
916 PRINTK1("Bad Packet received (runt)\n"); 1022 if (netif_msg_rx_err(db))
1023 dev_dbg(db->dev, "RX: Bad Packet (runt)\n");
917 } 1024 }
918 1025
919 if (RxLen > DM9000_PKT_MAX) { 1026 if (RxLen > DM9000_PKT_MAX) {
920 PRINTK1("RST: RX Len:%x\n", RxLen); 1027 dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen);
921 } 1028 }
922 1029
923 if (rxhdr.RxStatus & 0xbf00) { 1030 if (rxhdr.RxStatus & 0xbf) {
924 GoodPacket = false; 1031 GoodPacket = false;
925 if (rxhdr.RxStatus & 0x100) { 1032 if (rxhdr.RxStatus & 0x01) {
926 PRINTK1("fifo error\n"); 1033 if (netif_msg_rx_err(db))
1034 dev_dbg(db->dev, "fifo error\n");
927 dev->stats.rx_fifo_errors++; 1035 dev->stats.rx_fifo_errors++;
928 } 1036 }
929 if (rxhdr.RxStatus & 0x200) { 1037 if (rxhdr.RxStatus & 0x02) {
930 PRINTK1("crc error\n"); 1038 if (netif_msg_rx_err(db))
1039 dev_dbg(db->dev, "crc error\n");
931 dev->stats.rx_crc_errors++; 1040 dev->stats.rx_crc_errors++;
932 } 1041 }
933 if (rxhdr.RxStatus & 0x8000) { 1042 if (rxhdr.RxStatus & 0x80) {
934 PRINTK1("length error\n"); 1043 if (netif_msg_rx_err(db))
1044 dev_dbg(db->dev, "length error\n");
935 dev->stats.rx_length_errors++; 1045 dev->stats.rx_length_errors++;
936 } 1046 }
937 } 1047 }
@@ -960,72 +1070,119 @@ dm9000_rx(struct net_device *dev)
960 } while (rxbyte == DM9000_PKT_RDY); 1070 } while (rxbyte == DM9000_PKT_RDY);
961} 1071}
962 1072
963/* 1073static unsigned int
964 * Read a word data from SROM 1074dm9000_read_locked(board_info_t *db, int reg)
965 */
966static u16
967read_srom_word(board_info_t * db, int offset)
968{ 1075{
969 iow(db, DM9000_EPAR, offset); 1076 unsigned long flags;
970 iow(db, DM9000_EPCR, EPCR_ERPRR); 1077 unsigned int ret;
971 mdelay(8); /* according to the datasheet 200us should be enough, 1078
972 but it doesn't work */ 1079 spin_lock_irqsave(&db->lock, flags);
973 iow(db, DM9000_EPCR, 0x0); 1080 ret = ior(db, reg);
974 return (ior(db, DM9000_EPDRL) + (ior(db, DM9000_EPDRH) << 8)); 1081 spin_unlock_irqrestore(&db->lock, flags);
1082
1083 return ret;
1084}
1085
1086static int dm9000_wait_eeprom(board_info_t *db)
1087{
1088 unsigned int status;
1089 int timeout = 8; /* wait max 8msec */
1090
1091 /* The DM9000 data sheets say we should be able to
1092 * poll the ERRE bit in EPCR to wait for the EEPROM
1093 * operation. From testing several chips, this bit
1094 * does not seem to work.
1095 *
1096 * We attempt to use the bit, but fall back to the
1097 * timeout (which is why we do not return an error
1098 * on expiry) to say that the EEPROM operation has
1099 * completed.
1100 */
1101
1102 while (1) {
1103 status = dm9000_read_locked(db, DM9000_EPCR);
1104
1105 if ((status & EPCR_ERRE) == 0)
1106 break;
1107
1108 if (timeout-- < 0) {
1109 dev_dbg(db->dev, "timeout waiting EEPROM\n");
1110 break;
1111 }
1112 }
1113
1114 return 0;
975} 1115}
976 1116
977#ifdef DM9000_PROGRAM_EEPROM
978/* 1117/*
979 * Write a word data to SROM 1118 * Read a word data from EEPROM
980 */ 1119 */
981static void 1120static void
982write_srom_word(board_info_t * db, int offset, u16 val) 1121dm9000_read_eeprom(board_info_t *db, int offset, u8 *to)
983{ 1122{
1123 unsigned long flags;
1124
1125 if (db->flags & DM9000_PLATF_NO_EEPROM) {
1126 to[0] = 0xff;
1127 to[1] = 0xff;
1128 return;
1129 }
1130
1131 mutex_lock(&db->addr_lock);
1132
1133 spin_lock_irqsave(&db->lock, flags);
1134
984 iow(db, DM9000_EPAR, offset); 1135 iow(db, DM9000_EPAR, offset);
985 iow(db, DM9000_EPDRH, ((val >> 8) & 0xff)); 1136 iow(db, DM9000_EPCR, EPCR_ERPRR);
986 iow(db, DM9000_EPDRL, (val & 0xff)); 1137
987 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); 1138 spin_unlock_irqrestore(&db->lock, flags);
988 mdelay(8); /* same shit */ 1139
989 iow(db, DM9000_EPCR, 0); 1140 dm9000_wait_eeprom(db);
1141
1142 /* delay for at-least 150uS */
1143 msleep(1);
1144
1145 spin_lock_irqsave(&db->lock, flags);
1146
1147 iow(db, DM9000_EPCR, 0x0);
1148
1149 to[0] = ior(db, DM9000_EPDRL);
1150 to[1] = ior(db, DM9000_EPDRH);
1151
1152 spin_unlock_irqrestore(&db->lock, flags);
1153
1154 mutex_unlock(&db->addr_lock);
990} 1155}
991 1156
992/* 1157/*
993 * Only for development: 1158 * Write a word data to SROM
994 * Here we write static data to the eeprom in case
995 * we don't have valid content on a new board
996 */ 1159 */
997static void 1160static void
998program_eeprom(board_info_t * db) 1161dm9000_write_eeprom(board_info_t *db, int offset, u8 *data)
999{ 1162{
1000 u16 eeprom[] = { 0x0c00, 0x007f, 0x1300, /* MAC Address */ 1163 unsigned long flags;
1001 0x0000, /* Autoload: accept nothing */
1002 0x0a46, 0x9000, /* Vendor / Product ID */
1003 0x0000, /* pin control */
1004 0x0000,
1005 }; /* Wake-up mode control */
1006 int i;
1007 for (i = 0; i < 8; i++)
1008 write_srom_word(db, i, eeprom[i]);
1009}
1010#endif
1011 1164
1165 if (db->flags & DM9000_PLATF_NO_EEPROM)
1166 return;
1012 1167
1013/* 1168 mutex_lock(&db->addr_lock);
1014 * Calculate the CRC valude of the Rx packet
1015 * flag = 1 : return the reverse CRC (for the received packet CRC)
1016 * 0 : return the normal CRC (for Hash Table index)
1017 */
1018 1169
1019static unsigned long 1170 spin_lock_irqsave(&db->lock, flags);
1020cal_CRC(unsigned char *Data, unsigned int Len, u8 flag) 1171 iow(db, DM9000_EPAR, offset);
1021{ 1172 iow(db, DM9000_EPDRH, data[1]);
1173 iow(db, DM9000_EPDRL, data[0]);
1174 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW);
1175 spin_unlock_irqrestore(&db->lock, flags);
1176
1177 dm9000_wait_eeprom(db);
1022 1178
1023 u32 crc = ether_crc_le(Len, Data); 1179 mdelay(1); /* wait at least 150uS to clear */
1024 1180
1025 if (flag) 1181 spin_lock_irqsave(&db->lock, flags);
1026 return ~crc; 1182 iow(db, DM9000_EPCR, 0);
1183 spin_unlock_irqrestore(&db->lock, flags);
1027 1184
1028 return crc; 1185 mutex_unlock(&db->addr_lock);
1029} 1186}
1030 1187
1031/* 1188/*
@@ -1037,15 +1194,16 @@ dm9000_hash_table(struct net_device *dev)
1037 board_info_t *db = (board_info_t *) dev->priv; 1194 board_info_t *db = (board_info_t *) dev->priv;
1038 struct dev_mc_list *mcptr = dev->mc_list; 1195 struct dev_mc_list *mcptr = dev->mc_list;
1039 int mc_cnt = dev->mc_count; 1196 int mc_cnt = dev->mc_count;
1197 int i, oft;
1040 u32 hash_val; 1198 u32 hash_val;
1041 u16 i, oft, hash_table[4]; 1199 u16 hash_table[4];
1042 unsigned long flags; 1200 unsigned long flags;
1043 1201
1044 PRINTK2("dm9000_hash_table()\n"); 1202 dm9000_dbg(db, 1, "entering %s\n", __func__);
1045 1203
1046 spin_lock_irqsave(&db->lock,flags); 1204 spin_lock_irqsave(&db->lock, flags);
1047 1205
1048 for (i = 0, oft = 0x10; i < 6; i++, oft++) 1206 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
1049 iow(db, oft, dev->dev_addr[i]); 1207 iow(db, oft, dev->dev_addr[i]);
1050 1208
1051 /* Clear Hash Table */ 1209 /* Clear Hash Table */
@@ -1057,21 +1215,33 @@ dm9000_hash_table(struct net_device *dev)
1057 1215
1058 /* the multicast address in Hash Table : 64 bits */ 1216 /* the multicast address in Hash Table : 64 bits */
1059 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 1217 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1060 hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f; 1218 hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
1061 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1219 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1062 } 1220 }
1063 1221
1064 /* Write the hash table to MAC MD table */ 1222 /* Write the hash table to MAC MD table */
1065 for (i = 0, oft = 0x16; i < 4; i++) { 1223 for (i = 0, oft = DM9000_MAR; i < 4; i++) {
1066 iow(db, oft++, hash_table[i] & 0xff); 1224 iow(db, oft++, hash_table[i]);
1067 iow(db, oft++, (hash_table[i] >> 8) & 0xff); 1225 iow(db, oft++, hash_table[i] >> 8);
1068 } 1226 }
1069 1227
1070 spin_unlock_irqrestore(&db->lock,flags); 1228 spin_unlock_irqrestore(&db->lock, flags);
1071} 1229}
1072 1230
1073 1231
1074/* 1232/*
1233 * Sleep, either by using msleep() or if we are suspending, then
1234 * use mdelay() to sleep.
1235 */
1236static void dm9000_msleep(board_info_t *db, unsigned int ms)
1237{
1238 if (db->in_suspend)
1239 mdelay(ms);
1240 else
1241 msleep(ms);
1242}
1243
1244/*
1075 * Read a word from phyxcer 1245 * Read a word from phyxcer
1076 */ 1246 */
1077static int 1247static int
@@ -1082,6 +1252,8 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1082 unsigned int reg_save; 1252 unsigned int reg_save;
1083 int ret; 1253 int ret;
1084 1254
1255 mutex_lock(&db->addr_lock);
1256
1085 spin_lock_irqsave(&db->lock,flags); 1257 spin_lock_irqsave(&db->lock,flags);
1086 1258
1087 /* Save previous register address */ 1259 /* Save previous register address */
@@ -1091,7 +1263,15 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1091 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1263 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1092 1264
1093 iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */ 1265 iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */
1094 udelay(100); /* Wait read complete */ 1266
1267 writeb(reg_save, db->io_addr);
1268 spin_unlock_irqrestore(&db->lock,flags);
1269
1270 dm9000_msleep(db, 1); /* Wait read complete */
1271
1272 spin_lock_irqsave(&db->lock,flags);
1273 reg_save = readb(db->io_addr);
1274
1095 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 1275 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
1096 1276
1097 /* The read data keeps on REG_0D & REG_0E */ 1277 /* The read data keeps on REG_0D & REG_0E */
@@ -1099,9 +1279,9 @@ dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1099 1279
1100 /* restore the previous address */ 1280 /* restore the previous address */
1101 writeb(reg_save, db->io_addr); 1281 writeb(reg_save, db->io_addr);
1102
1103 spin_unlock_irqrestore(&db->lock,flags); 1282 spin_unlock_irqrestore(&db->lock,flags);
1104 1283
1284 mutex_unlock(&db->addr_lock);
1105 return ret; 1285 return ret;
1106} 1286}
1107 1287
@@ -1115,6 +1295,8 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1115 unsigned long flags; 1295 unsigned long flags;
1116 unsigned long reg_save; 1296 unsigned long reg_save;
1117 1297
1298 mutex_lock(&db->addr_lock);
1299
1118 spin_lock_irqsave(&db->lock,flags); 1300 spin_lock_irqsave(&db->lock,flags);
1119 1301
1120 /* Save previous register address */ 1302 /* Save previous register address */
@@ -1124,25 +1306,38 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1124 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1306 iow(db, DM9000_EPAR, DM9000_PHY | reg);
1125 1307
1126 /* Fill the written data into REG_0D & REG_0E */ 1308 /* Fill the written data into REG_0D & REG_0E */
1127 iow(db, DM9000_EPDRL, (value & 0xff)); 1309 iow(db, DM9000_EPDRL, value);
1128 iow(db, DM9000_EPDRH, ((value >> 8) & 0xff)); 1310 iow(db, DM9000_EPDRH, value >> 8);
1129 1311
1130 iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */ 1312 iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */
1131 udelay(500); /* Wait write complete */ 1313
1314 writeb(reg_save, db->io_addr);
1315 spin_unlock_irqrestore(&db->lock, flags);
1316
1317 dm9000_msleep(db, 1); /* Wait write complete */
1318
1319 spin_lock_irqsave(&db->lock,flags);
1320 reg_save = readb(db->io_addr);
1321
1132 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 1322 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
1133 1323
1134 /* restore the previous address */ 1324 /* restore the previous address */
1135 writeb(reg_save, db->io_addr); 1325 writeb(reg_save, db->io_addr);
1136 1326
1137 spin_unlock_irqrestore(&db->lock,flags); 1327 spin_unlock_irqrestore(&db->lock, flags);
1328 mutex_unlock(&db->addr_lock);
1138} 1329}
1139 1330
1140static int 1331static int
1141dm9000_drv_suspend(struct platform_device *dev, pm_message_t state) 1332dm9000_drv_suspend(struct platform_device *dev, pm_message_t state)
1142{ 1333{
1143 struct net_device *ndev = platform_get_drvdata(dev); 1334 struct net_device *ndev = platform_get_drvdata(dev);
1335 board_info_t *db;
1144 1336
1145 if (ndev) { 1337 if (ndev) {
1338 db = (board_info_t *) ndev->priv;
1339 db->in_suspend = 1;
1340
1146 if (netif_running(ndev)) { 1341 if (netif_running(ndev)) {
1147 netif_device_detach(ndev); 1342 netif_device_detach(ndev);
1148 dm9000_shutdown(ndev); 1343 dm9000_shutdown(ndev);
@@ -1165,6 +1360,8 @@ dm9000_drv_resume(struct platform_device *dev)
1165 1360
1166 netif_device_attach(ndev); 1361 netif_device_attach(ndev);
1167 } 1362 }
1363
1364 db->in_suspend = 0;
1168 } 1365 }
1169 return 0; 1366 return 0;
1170} 1367}
@@ -1180,8 +1377,7 @@ dm9000_drv_remove(struct platform_device *pdev)
1180 dm9000_release_board(pdev, (board_info_t *) ndev->priv); 1377 dm9000_release_board(pdev, (board_info_t *) ndev->priv);
1181 free_netdev(ndev); /* free device structure */ 1378 free_netdev(ndev); /* free device structure */
1182 1379
1183 PRINTK1("clean_module() exit\n"); 1380 dev_dbg(&pdev->dev, "released and freed device\n");
1184
1185 return 0; 1381 return 0;
1186} 1382}
1187 1383
@@ -1199,7 +1395,7 @@ static struct platform_driver dm9000_driver = {
1199static int __init 1395static int __init
1200dm9000_init(void) 1396dm9000_init(void)
1201{ 1397{
1202 printk(KERN_INFO "%s Ethernet Driver\n", CARDNAME); 1398 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1203 1399
1204 return platform_driver_register(&dm9000_driver); /* search board and register */ 1400 return platform_driver_register(&dm9000_driver); /* search board and register */
1205} 1401}
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index d876787ce336..85e66f4c7886 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -50,7 +50,7 @@ struct e1000_stats {
50 int stat_offset; 50 int stat_offset;
51}; 51};
52 52
53#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ 53#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \
54 offsetof(struct e1000_adapter, m) 54 offsetof(struct e1000_adapter, m)
55static const struct e1000_stats e1000_gstrings_stats[] = { 55static const struct e1000_stats e1000_gstrings_stats[] = {
56 { "rx_packets", E1000_STAT(stats.gprc) }, 56 { "rx_packets", E1000_STAT(stats.gprc) },
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7c5b05a82f0e..0991648c53dc 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -926,8 +926,6 @@ e1000_probe(struct pci_dev *pdev,
926{ 926{
927 struct net_device *netdev; 927 struct net_device *netdev;
928 struct e1000_adapter *adapter; 928 struct e1000_adapter *adapter;
929 unsigned long mmio_start, mmio_len;
930 unsigned long flash_start, flash_len;
931 929
932 static int cards_found = 0; 930 static int cards_found = 0;
933 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 931 static int global_quad_port_a = 0; /* global ksp3 port a indication */
@@ -970,11 +968,9 @@ e1000_probe(struct pci_dev *pdev,
970 adapter->hw.back = adapter; 968 adapter->hw.back = adapter;
971 adapter->msg_enable = (1 << debug) - 1; 969 adapter->msg_enable = (1 << debug) - 1;
972 970
973 mmio_start = pci_resource_start(pdev, BAR_0);
974 mmio_len = pci_resource_len(pdev, BAR_0);
975
976 err = -EIO; 971 err = -EIO;
977 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 972 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
973 pci_resource_len(pdev, BAR_0));
978 if (!adapter->hw.hw_addr) 974 if (!adapter->hw.hw_addr)
979 goto err_ioremap; 975 goto err_ioremap;
980 976
@@ -1009,10 +1005,6 @@ e1000_probe(struct pci_dev *pdev,
1009#endif 1005#endif
1010 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1006 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1011 1007
1012 netdev->mem_start = mmio_start;
1013 netdev->mem_end = mmio_start + mmio_len;
1014 netdev->base_addr = adapter->hw.io_base;
1015
1016 adapter->bd_number = cards_found; 1008 adapter->bd_number = cards_found;
1017 1009
1018 /* setup the private structure */ 1010 /* setup the private structure */
@@ -1025,9 +1017,9 @@ e1000_probe(struct pci_dev *pdev,
1025 * because it depends on mac_type */ 1017 * because it depends on mac_type */
1026 if ((adapter->hw.mac_type == e1000_ich8lan) && 1018 if ((adapter->hw.mac_type == e1000_ich8lan) &&
1027 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 1019 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1028 flash_start = pci_resource_start(pdev, 1); 1020 adapter->hw.flash_address =
1029 flash_len = pci_resource_len(pdev, 1); 1021 ioremap(pci_resource_start(pdev, 1),
1030 adapter->hw.flash_address = ioremap(flash_start, flash_len); 1022 pci_resource_len(pdev, 1));
1031 if (!adapter->hw.flash_address) 1023 if (!adapter->hw.flash_address)
1032 goto err_flashmap; 1024 goto err_flashmap;
1033 } 1025 }
@@ -1203,6 +1195,14 @@ e1000_probe(struct pci_dev *pdev,
1203 1195
1204 printk("%s\n", print_mac(mac, netdev->dev_addr)); 1196 printk("%s\n", print_mac(mac, netdev->dev_addr));
1205 1197
1198 if (adapter->hw.bus_type == e1000_bus_type_pci_express) {
1199 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
1200 "longer be supported by this driver in the future.\n",
1201 pdev->vendor, pdev->device);
1202 DPRINTK(PROBE, WARNING, "please use the \"e1000e\" "
1203 "driver instead.\n");
1204 }
1205
1206 /* reset the hardware with the new settings */ 1206 /* reset the hardware with the new settings */
1207 e1000_reset(adapter); 1207 e1000_reset(adapter);
1208 1208
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index f58f017ee47a..3031d6d16247 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1055,23 +1055,6 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter)
1055 } 1055 }
1056} 1056}
1057 1057
1058static void e1000_release_manageability(struct e1000_adapter *adapter)
1059{
1060 if (adapter->flags & FLAG_MNG_PT_ENABLED) {
1061 struct e1000_hw *hw = &adapter->hw;
1062
1063 u32 manc = er32(MANC);
1064
1065 /* re-enable hardware interception of ARP */
1066 manc |= E1000_MANC_ARP_EN;
1067 manc &= ~E1000_MANC_EN_MNG2HOST;
1068
1069 /* don't explicitly have to mess with MANC2H since
1070 * MANC has an enable disable that gates MANC2H */
1071 ew32(MANC, manc);
1072 }
1073}
1074
1075/** 1058/**
1076 * @e1000_alloc_ring - allocate memory for a ring structure 1059 * @e1000_alloc_ring - allocate memory for a ring structure
1077 **/ 1060 **/
@@ -1561,9 +1544,6 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
1561 1544
1562 manc = er32(MANC); 1545 manc = er32(MANC);
1563 1546
1564 /* disable hardware interception of ARP */
1565 manc &= ~(E1000_MANC_ARP_EN);
1566
1567 /* enable receiving management packets to the host. this will probably 1547 /* enable receiving management packets to the host. this will probably
1568 * generate destination unreachable messages from the host OS, but 1548 * generate destination unreachable messages from the host OS, but
1569 * the packets will be handled on SMBUS */ 1549 * the packets will be handled on SMBUS */
@@ -1690,6 +1670,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1690 else 1670 else
1691 rctl |= E1000_RCTL_LPE; 1671 rctl |= E1000_RCTL_LPE;
1692 1672
1673 /* Enable hardware CRC frame stripping */
1674 rctl |= E1000_RCTL_SECRC;
1675
1693 /* Setup buffer sizes */ 1676 /* Setup buffer sizes */
1694 rctl &= ~E1000_RCTL_SZ_4096; 1677 rctl &= ~E1000_RCTL_SZ_4096;
1695 rctl |= E1000_RCTL_BSEX; 1678 rctl |= E1000_RCTL_BSEX;
@@ -1755,9 +1738,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1755 1738
1756 /* Enable Packet split descriptors */ 1739 /* Enable Packet split descriptors */
1757 rctl |= E1000_RCTL_DTYP_PS; 1740 rctl |= E1000_RCTL_DTYP_PS;
1758
1759 /* Enable hardware CRC frame stripping */
1760 rctl |= E1000_RCTL_SECRC;
1761 1741
1762 psrctl |= adapter->rx_ps_bsize0 >> 1742 psrctl |= adapter->rx_ps_bsize0 >>
1763 E1000_PSRCTL_BSIZE0_SHIFT; 1743 E1000_PSRCTL_BSIZE0_SHIFT;
@@ -2008,7 +1988,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
2008 u16 mii_reg; 1988 u16 mii_reg;
2009 1989
2010 /* WoL is enabled */ 1990 /* WoL is enabled */
2011 if (!adapter->wol) 1991 if (adapter->wol)
2012 return; 1992 return;
2013 1993
2014 /* non-copper PHY? */ 1994 /* non-copper PHY? */
@@ -2140,8 +2120,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
2140 phy_data &= ~IGP02E1000_PM_SPD; 2120 phy_data &= ~IGP02E1000_PM_SPD;
2141 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 2121 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
2142 } 2122 }
2143
2144 e1000_release_manageability(adapter);
2145} 2123}
2146 2124
2147int e1000e_up(struct e1000_adapter *adapter) 2125int e1000e_up(struct e1000_adapter *adapter)
@@ -3487,8 +3465,6 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3487 pci_enable_wake(pdev, PCI_D3cold, 0); 3465 pci_enable_wake(pdev, PCI_D3cold, 0);
3488 } 3466 }
3489 3467
3490 e1000_release_manageability(adapter);
3491
3492 /* make sure adapter isn't asleep if manageability is enabled */ 3468 /* make sure adapter isn't asleep if manageability is enabled */
3493 if (adapter->flags & FLAG_MNG_PT_ENABLED) { 3469 if (adapter->flags & FLAG_MNG_PT_ENABLED) {
3494 pci_enable_wake(pdev, PCI_D3hot, 1); 3470 pci_enable_wake(pdev, PCI_D3hot, 1);
@@ -4054,8 +4030,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4054 4030
4055 flush_scheduled_work(); 4031 flush_scheduled_work();
4056 4032
4057 e1000_release_manageability(adapter);
4058
4059 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4033 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4060 * would have already happened in close and is redundant. */ 4034 * would have already happened in close and is redundant. */
4061 e1000_release_hw_control(adapter); 4035 e1000_release_hw_control(adapter);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d4843d014bc9..801b4d9cd972 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -166,21 +166,24 @@
166 * Hardware access: 166 * Hardware access:
167 */ 167 */
168 168
169#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ 169#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */
170#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 170#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */
171#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 171#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */
172#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 172#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */
173#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 173#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */
174#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 174#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */
175#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 175#define DEV_HAS_MSI 0x00040 /* device supports MSI */
176#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 176#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */
177#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 177#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */
178#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 178#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */
179#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */ 179#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */
180#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ 180#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */
181#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ 181#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */
182#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ 182#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */
183#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */ 183#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */
184#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */
185#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */
186#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */
184 187
185enum { 188enum {
186 NvRegIrqStatus = 0x000, 189 NvRegIrqStatus = 0x000,
@@ -266,9 +269,12 @@ enum {
266#define NVREG_RNDSEED_FORCE3 0x7400 269#define NVREG_RNDSEED_FORCE3 0x7400
267 270
268 NvRegTxDeferral = 0xA0, 271 NvRegTxDeferral = 0xA0,
269#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 272#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
270#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 273#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
271#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 274#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
275#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
276#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
277#define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
272 NvRegRxDeferral = 0xA4, 278 NvRegRxDeferral = 0xA4,
273#define NVREG_RX_DEFERRAL_DEFAULT 0x16 279#define NVREG_RX_DEFERRAL_DEFAULT 0x16
274 NvRegMacAddrA = 0xA8, 280 NvRegMacAddrA = 0xA8,
@@ -318,8 +324,10 @@ enum {
318 NvRegTxRingPhysAddrHigh = 0x148, 324 NvRegTxRingPhysAddrHigh = 0x148,
319 NvRegRxRingPhysAddrHigh = 0x14C, 325 NvRegRxRingPhysAddrHigh = 0x14C,
320 NvRegTxPauseFrame = 0x170, 326 NvRegTxPauseFrame = 0x170,
321#define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080 327#define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
322#define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010 328#define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
329#define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
330#define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
323 NvRegMIIStatus = 0x180, 331 NvRegMIIStatus = 0x180,
324#define NVREG_MIISTAT_ERROR 0x0001 332#define NVREG_MIISTAT_ERROR 0x0001
325#define NVREG_MIISTAT_LINKCHANGE 0x0008 333#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -2751,7 +2759,12 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2751 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2759 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2752 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2760 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2753 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2761 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2754 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); 2762 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
2763 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
2764 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
2765 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)
2766 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
2767 writel(pause_enable, base + NvRegTxPauseFrame);
2755 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2768 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2756 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2769 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2757 } else { 2770 } else {
@@ -2785,6 +2798,7 @@ static int nv_update_linkspeed(struct net_device *dev)
2785 int retval = 0; 2798 int retval = 0;
2786 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2799 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
2787 u32 txrxFlags = 0; 2800 u32 txrxFlags = 0;
2801 u32 phy_exp;
2788 2802
2789 /* BMSR_LSTATUS is latched, read it twice: 2803 /* BMSR_LSTATUS is latched, read it twice:
2790 * we want the current value. 2804 * we want the current value.
@@ -2912,13 +2926,25 @@ set_speed:
2912 phyreg |= PHY_1000; 2926 phyreg |= PHY_1000;
2913 writel(phyreg, base + NvRegPhyInterface); 2927 writel(phyreg, base + NvRegPhyInterface);
2914 2928
2929 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
2915 if (phyreg & PHY_RGMII) { 2930 if (phyreg & PHY_RGMII) {
2916 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2931 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
2917 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 2932 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2918 else 2933 } else {
2919 txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2934 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
2935 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
2936 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
2937 else
2938 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
2939 } else {
2940 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2941 }
2942 }
2920 } else { 2943 } else {
2921 txreg = NVREG_TX_DEFERRAL_DEFAULT; 2944 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
2945 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
2946 else
2947 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2922 } 2948 }
2923 writel(txreg, base + NvRegTxDeferral); 2949 writel(txreg, base + NvRegTxDeferral);
2924 2950
@@ -5155,7 +5181,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5155 } 5181 }
5156 5182
5157 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5183 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5158 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { 5184 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5185 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5186 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5159 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5187 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5160 } 5188 }
5161 5189
@@ -5559,107 +5587,107 @@ static struct pci_device_id pci_tbl[] = {
5559 }, 5587 },
5560 { /* MCP55 Ethernet Controller */ 5588 { /* MCP55 Ethernet Controller */
5561 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5589 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
5562 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5590 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5563 }, 5591 },
5564 { /* MCP55 Ethernet Controller */ 5592 { /* MCP55 Ethernet Controller */
5565 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5593 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
5566 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5594 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5567 }, 5595 },
5568 { /* MCP61 Ethernet Controller */ 5596 { /* MCP61 Ethernet Controller */
5569 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5597 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
5570 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5598 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5571 }, 5599 },
5572 { /* MCP61 Ethernet Controller */ 5600 { /* MCP61 Ethernet Controller */
5573 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5601 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
5574 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5602 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5575 }, 5603 },
5576 { /* MCP61 Ethernet Controller */ 5604 { /* MCP61 Ethernet Controller */
5577 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5605 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
5578 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5606 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5579 }, 5607 },
5580 { /* MCP61 Ethernet Controller */ 5608 { /* MCP61 Ethernet Controller */
5581 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
5582 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5610 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5583 }, 5611 },
5584 { /* MCP65 Ethernet Controller */ 5612 { /* MCP65 Ethernet Controller */
5585 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
5586 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5614 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5587 }, 5615 },
5588 { /* MCP65 Ethernet Controller */ 5616 { /* MCP65 Ethernet Controller */
5589 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
5590 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5618 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5591 }, 5619 },
5592 { /* MCP65 Ethernet Controller */ 5620 { /* MCP65 Ethernet Controller */
5593 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
5594 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5622 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5595 }, 5623 },
5596 { /* MCP65 Ethernet Controller */ 5624 { /* MCP65 Ethernet Controller */
5597 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
5598 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5626 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5599 }, 5627 },
5600 { /* MCP67 Ethernet Controller */ 5628 { /* MCP67 Ethernet Controller */
5601 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
5602 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5630 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5603 }, 5631 },
5604 { /* MCP67 Ethernet Controller */ 5632 { /* MCP67 Ethernet Controller */
5605 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
5606 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5634 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5607 }, 5635 },
5608 { /* MCP67 Ethernet Controller */ 5636 { /* MCP67 Ethernet Controller */
5609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5637 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
5610 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5638 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5611 }, 5639 },
5612 { /* MCP67 Ethernet Controller */ 5640 { /* MCP67 Ethernet Controller */
5613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5641 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5614 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5642 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
5615 }, 5643 },
5616 { /* MCP73 Ethernet Controller */ 5644 { /* MCP73 Ethernet Controller */
5617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 5645 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28),
5618 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5646 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5619 }, 5647 },
5620 { /* MCP73 Ethernet Controller */ 5648 { /* MCP73 Ethernet Controller */
5621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 5649 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29),
5622 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5650 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5623 }, 5651 },
5624 { /* MCP73 Ethernet Controller */ 5652 { /* MCP73 Ethernet Controller */
5625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 5653 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30),
5626 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5654 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5627 }, 5655 },
5628 { /* MCP73 Ethernet Controller */ 5656 { /* MCP73 Ethernet Controller */
5629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 5657 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
5630 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5658 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5631 }, 5659 },
5632 { /* MCP77 Ethernet Controller */ 5660 { /* MCP77 Ethernet Controller */
5633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 5661 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
5634 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5662 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5635 }, 5663 },
5636 { /* MCP77 Ethernet Controller */ 5664 { /* MCP77 Ethernet Controller */
5637 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 5665 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
5638 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5666 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5639 }, 5667 },
5640 { /* MCP77 Ethernet Controller */ 5668 { /* MCP77 Ethernet Controller */
5641 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 5669 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
5642 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5670 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5643 }, 5671 },
5644 { /* MCP77 Ethernet Controller */ 5672 { /* MCP77 Ethernet Controller */
5645 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 5673 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
5646 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5674 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5647 }, 5675 },
5648 { /* MCP79 Ethernet Controller */ 5676 { /* MCP79 Ethernet Controller */
5649 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 5677 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
5650 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5678 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5651 }, 5679 },
5652 { /* MCP79 Ethernet Controller */ 5680 { /* MCP79 Ethernet Controller */
5653 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 5681 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
5654 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5682 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5655 }, 5683 },
5656 { /* MCP79 Ethernet Controller */ 5684 { /* MCP79 Ethernet Controller */
5657 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 5685 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
5658 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5686 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5659 }, 5687 },
5660 { /* MCP79 Ethernet Controller */ 5688 { /* MCP79 Ethernet Controller */
5661 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 5689 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
5662 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5690 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX,
5663 }, 5691 },
5664 {0,}, 5692 {0,},
5665}; 5693};
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 0431e9ed0fac..4244fc282f21 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -130,8 +130,8 @@ static void free_skb_resources(struct gfar_private *priv);
130static void gfar_set_multi(struct net_device *dev); 130static void gfar_set_multi(struct net_device *dev);
131static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 131static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
132static void gfar_configure_serdes(struct net_device *dev); 132static void gfar_configure_serdes(struct net_device *dev);
133extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value); 133extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
134extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum); 134extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
135#ifdef CONFIG_GFAR_NAPI 135#ifdef CONFIG_GFAR_NAPI
136static int gfar_poll(struct napi_struct *napi, int budget); 136static int gfar_poll(struct napi_struct *napi, int budget);
137#endif 137#endif
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 6a647d95e6ea..24327629bf03 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -51,7 +51,7 @@
51 * the local mdio pins, which may not be the same as system mdio bus, used for 51 * the local mdio pins, which may not be the same as system mdio bus, used for
52 * controlling the external PHYs, for example. 52 * controlling the external PHYs, for example.
53 */ 53 */
54int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, 54int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
55 int regnum, u16 value) 55 int regnum, u16 value)
56{ 56{
57 /* Set the PHY address and the register address we want to write */ 57 /* Set the PHY address and the register address we want to write */
@@ -77,7 +77,7 @@ int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id,
77 * and are always tied to the local mdio pins, which may not be the 77 * and are always tied to the local mdio pins, which may not be the
78 * same as system mdio bus, used for controlling the external PHYs, for eg. 78 * same as system mdio bus, used for controlling the external PHYs, for eg.
79 */ 79 */
80int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum) 80int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum)
81 81
82{ 82{
83 u16 value; 83 u16 value;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index cfcd15af501e..30c9b3b0d131 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -289,7 +289,6 @@ static void ax_bump(struct mkiss *ax)
289 *ax->rbuff &= ~0x20; 289 *ax->rbuff &= ~0x20;
290 } 290 }
291 } 291 }
292 spin_unlock_bh(&ax->buflock);
293 292
294 count = ax->rcount; 293 count = ax->rcount;
295 294
@@ -297,17 +296,17 @@ static void ax_bump(struct mkiss *ax)
297 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n", 296 printk(KERN_ERR "mkiss: %s: memory squeeze, dropping packet.\n",
298 ax->dev->name); 297 ax->dev->name);
299 ax->stats.rx_dropped++; 298 ax->stats.rx_dropped++;
299 spin_unlock_bh(&ax->buflock);
300 return; 300 return;
301 } 301 }
302 302
303 spin_lock_bh(&ax->buflock);
304 memcpy(skb_put(skb,count), ax->rbuff, count); 303 memcpy(skb_put(skb,count), ax->rbuff, count);
305 spin_unlock_bh(&ax->buflock);
306 skb->protocol = ax25_type_trans(skb, ax->dev); 304 skb->protocol = ax25_type_trans(skb, ax->dev);
307 netif_rx(skb); 305 netif_rx(skb);
308 ax->dev->last_rx = jiffies; 306 ax->dev->last_rx = jiffies;
309 ax->stats.rx_packets++; 307 ax->stats.rx_packets++;
310 ax->stats.rx_bytes += count; 308 ax->stats.rx_bytes += count;
309 spin_unlock_bh(&ax->buflock);
311} 310}
312 311
313static void kiss_unesc(struct mkiss *ax, unsigned char s) 312static void kiss_unesc(struct mkiss *ax, unsigned char s)
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 9bc1132fa788..5757788227be 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -302,7 +302,6 @@ static int __devexit rgmii_remove(struct of_device *ofdev)
302static struct of_device_id rgmii_match[] = 302static struct of_device_id rgmii_match[] =
303{ 303{
304 { 304 {
305 .type = "rgmii-interface",
306 .compatible = "ibm,rgmii", 305 .compatible = "ibm,rgmii",
307 }, 306 },
308 { 307 {
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index f69721e4eaa1..0447f9bcd27a 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -43,7 +43,7 @@ struct igb_stats {
43 int stat_offset; 43 int stat_offset;
44}; 44};
45 45
46#define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \ 46#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \
47 offsetof(struct igb_adapter, m) 47 offsetof(struct igb_adapter, m)
48static const struct igb_stats igb_gstrings_stats[] = { 48static const struct igb_stats igb_gstrings_stats[] = {
49 { "rx_packets", IGB_STAT(stats.gprc) }, 49 { "rx_packets", IGB_STAT(stats.gprc) },
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index d4eb8e2d8720..bff280eff5e3 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -606,9 +606,6 @@ static void igb_init_manageability(struct igb_adapter *adapter)
606 u32 manc2h = rd32(E1000_MANC2H); 606 u32 manc2h = rd32(E1000_MANC2H);
607 u32 manc = rd32(E1000_MANC); 607 u32 manc = rd32(E1000_MANC);
608 608
609 /* disable hardware interception of ARP */
610 manc &= ~(E1000_MANC_ARP_EN);
611
612 /* enable receiving management packets to the host */ 609 /* enable receiving management packets to the host */
613 /* this will probably generate destination unreachable messages 610 /* this will probably generate destination unreachable messages
614 * from the host OS, but the packets will be handled on SMBUS */ 611 * from the host OS, but the packets will be handled on SMBUS */
@@ -623,25 +620,6 @@ static void igb_init_manageability(struct igb_adapter *adapter)
623 } 620 }
624} 621}
625 622
626static void igb_release_manageability(struct igb_adapter *adapter)
627{
628 struct e1000_hw *hw = &adapter->hw;
629
630 if (adapter->en_mng_pt) {
631 u32 manc = rd32(E1000_MANC);
632
633 /* re-enable hardware interception of ARP */
634 manc |= E1000_MANC_ARP_EN;
635 manc &= ~E1000_MANC_EN_MNG2HOST;
636
637 /* don't explicitly have to mess with MANC2H since
638 * MANC has an enable disable that gates MANC2H */
639
640 /* XXX stop the hardware watchdog ? */
641 wr32(E1000_MANC, manc);
642 }
643}
644
645/** 623/**
646 * igb_configure - configure the hardware for RX and TX 624 * igb_configure - configure the hardware for RX and TX
647 * @adapter: private board structure 625 * @adapter: private board structure
@@ -844,7 +822,6 @@ void igb_reset(struct igb_adapter *adapter)
844 822
845 igb_reset_adaptive(&adapter->hw); 823 igb_reset_adaptive(&adapter->hw);
846 adapter->hw.phy.ops.get_phy_info(&adapter->hw); 824 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
847 igb_release_manageability(adapter);
848} 825}
849 826
850/** 827/**
@@ -1178,9 +1155,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1178 1155
1179 flush_scheduled_work(); 1156 flush_scheduled_work();
1180 1157
1181
1182 igb_release_manageability(adapter);
1183
1184 /* Release control of h/w to f/w. If f/w is AMT enabled, this 1158 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1185 * would have already happened in close and is redundant. */ 1159 * would have already happened in close and is redundant. */
1186 igb_release_hw_control(adapter); 1160 igb_release_hw_control(adapter);
@@ -3955,8 +3929,6 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
3955 pci_enable_wake(pdev, PCI_D3cold, 0); 3929 pci_enable_wake(pdev, PCI_D3cold, 0);
3956 } 3930 }
3957 3931
3958 igb_release_manageability(adapter);
3959
3960 /* make sure adapter isn't asleep if manageability is enabled */ 3932 /* make sure adapter isn't asleep if manageability is enabled */
3961 if (adapter->en_mng_pt) { 3933 if (adapter->en_mng_pt) {
3962 pci_enable_wake(pdev, PCI_D3hot, 1); 3934 pci_enable_wake(pdev, PCI_D3hot, 1);
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index a267dd862520..53a9fd086f96 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -49,7 +49,7 @@ struct ixgb_stats {
49 int stat_offset; 49 int stat_offset;
50}; 50};
51 51
52#define IXGB_STAT(m) sizeof(((struct ixgb_adapter *)0)->m), \ 52#define IXGB_STAT(m) FIELD_SIZEOF(struct ixgb_adapter, m), \
53 offsetof(struct ixgb_adapter, m) 53 offsetof(struct ixgb_adapter, m)
54static struct ixgb_stats ixgb_gstrings_stats[] = { 54static struct ixgb_stats ixgb_gstrings_stats[] = {
55 {"rx_packets", IXGB_STAT(net_stats.rx_packets)}, 55 {"rx_packets", IXGB_STAT(net_stats.rx_packets)},
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ead49e54f31b..23d0a4afe0e1 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -220,7 +220,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
220 tx_ring->stats.bytes += tx_buffer_info->length; 220 tx_ring->stats.bytes += tx_buffer_info->length;
221 if (cleaned) { 221 if (cleaned) {
222 struct sk_buff *skb = tx_buffer_info->skb; 222 struct sk_buff *skb = tx_buffer_info->skb;
223#ifdef NETIF_F_TSO
224 unsigned int segs, bytecount; 223 unsigned int segs, bytecount;
225 segs = skb_shinfo(skb)->gso_segs ?: 1; 224 segs = skb_shinfo(skb)->gso_segs ?: 1;
226 /* multiply data chunks by size of headers */ 225 /* multiply data chunks by size of headers */
@@ -228,10 +227,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
228 skb->len; 227 skb->len;
229 total_tx_packets += segs; 228 total_tx_packets += segs;
230 total_tx_bytes += bytecount; 229 total_tx_bytes += bytecount;
231#else
232 total_tx_packets++;
233 total_tx_bytes += skb->len;
234#endif
235 } 230 }
236 ixgbe_unmap_and_free_tx_resource(adapter, 231 ixgbe_unmap_and_free_tx_resource(adapter,
237 tx_buffer_info); 232 tx_buffer_info);
@@ -1942,6 +1937,10 @@ static int ixgbe_open(struct net_device *netdev)
1942 int err; 1937 int err;
1943 u32 num_rx_queues = adapter->num_rx_queues; 1938 u32 num_rx_queues = adapter->num_rx_queues;
1944 1939
1940 /* disallow open during test */
1941 if (test_bit(__IXGBE_TESTING, &adapter->state))
1942 return -EBUSY;
1943
1945try_intr_reinit: 1944try_intr_reinit:
1946 /* allocate transmit descriptors */ 1945 /* allocate transmit descriptors */
1947 err = ixgbe_setup_all_tx_resources(adapter); 1946 err = ixgbe_setup_all_tx_resources(adapter);
@@ -2278,11 +2277,29 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
2278 IXGBE_ADVTXD_DTYP_CTXT); 2277 IXGBE_ADVTXD_DTYP_CTXT);
2279 2278
2280 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2279 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2281 if (skb->protocol == htons(ETH_P_IP)) 2280 switch (skb->protocol) {
2281 case __constant_htons(ETH_P_IP):
2282 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2282 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2283 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2284 type_tucmd_mlhl |=
2285 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2286 break;
2287
2288 case __constant_htons(ETH_P_IPV6):
2289 /* XXX what about other V6 headers?? */
2290 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2291 type_tucmd_mlhl |=
2292 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2293 break;
2283 2294
2284 if (skb->sk->sk_protocol == IPPROTO_TCP) 2295 default:
2285 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2296 if (unlikely(net_ratelimit())) {
2297 DPRINTK(PROBE, WARNING,
2298 "partial checksum but proto=%x!\n",
2299 skb->protocol);
2300 }
2301 break;
2302 }
2286 } 2303 }
2287 2304
2288 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 2305 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -2778,6 +2795,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2778 hw->mac.type, hw->phy.type, 2795 hw->mac.type, hw->phy.type,
2779 (part_num >> 8), (part_num & 0xff)); 2796 (part_num >> 8), (part_num & 0xff));
2780 2797
2798 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
2799 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
2800 "this card is not sufficient for optimal "
2801 "performance.\n");
2802 dev_warn(&pdev->dev, "For optimal performance a x8 "
2803 "PCI-Express slot is required.\n");
2804 }
2805
2781 /* reset the hardware with the new settings */ 2806 /* reset the hardware with the new settings */
2782 ixgbe_start_hw(hw); 2807 ixgbe_start_hw(hw);
2783 2808
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 521dc0322ee4..75ef9d0d974d 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/bitmap.h> 35#include <linux/bitmap.h>
36#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
37#include <linux/vmalloc.h>
37 38
38#include "mlx4.h" 39#include "mlx4.h"
39 40
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 679dfdb6807f..79b317b88c86 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -578,13 +578,6 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
578 goto err_free; 578 goto err_free;
579 } 579 }
580 580
581 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
582 key_to_hw_index(fmr->mr.key), NULL);
583 if (!fmr->mpt) {
584 err = -ENOMEM;
585 goto err_free;
586 }
587
588 return 0; 581 return 0;
589 582
590err_free: 583err_free:
@@ -595,7 +588,19 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
595 588
596int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 589int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
597{ 590{
598 return mlx4_mr_enable(dev, &fmr->mr); 591 struct mlx4_priv *priv = mlx4_priv(dev);
592 int err;
593
594 err = mlx4_mr_enable(dev, &fmr->mr);
595 if (err)
596 return err;
597
598 fmr->mpt = mlx4_table_find(&priv->mr_table.dmpt_table,
599 key_to_hw_index(fmr->mr.key), NULL);
600 if (!fmr->mpt)
601 return -ENOMEM;
602
603 return 0;
599} 604}
600EXPORT_SYMBOL_GPL(mlx4_fmr_enable); 605EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
601 606
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 31e047dd7bb3..501e451be911 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -309,8 +309,8 @@ static ssize_t show_local_mac(struct netconsole_target *nt, char *buf)
309 struct net_device *dev = nt->np.dev; 309 struct net_device *dev = nt->np.dev;
310 310
311 DECLARE_MAC_BUF(mac); 311 DECLARE_MAC_BUF(mac);
312 return snprintf(buf, PAGE_SIZE, "%s\n", 312 return snprintf(buf, PAGE_SIZE, "%s\n", dev ?
313 print_mac(mac, dev->dev_addr)); 313 print_mac(mac, dev->dev_addr) : "ff:ff:ff:ff:ff:ff");
314} 314}
315 315
316static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf) 316static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf)
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 6b3384a24f07..26aa8fe1fb2d 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -33,20 +33,20 @@
33 * I have also done a look in the following sources: (mail me if you need them) 33 * I have also done a look in the following sources: (mail me if you need them)
34 * crynwr-packet-driver by Russ Nelson 34 * crynwr-packet-driver by Russ Nelson
35 * Garret A. Wollman's (fourth) i82586-driver for BSD 35 * Garret A. Wollman's (fourth) i82586-driver for BSD
36 * (before getting an i82596 (yes 596 not 586) manual, the existing drivers helped 36 * (before getting an i82596 (yes 596 not 586) manual, the existing drivers
37 * me a lot to understand this tricky chip.) 37 * helped me a lot to understand this tricky chip.)
38 * 38 *
39 * Known Problems: 39 * Known Problems:
40 * The internal sysbus seems to be slow. So we often lose packets because of 40 * The internal sysbus seems to be slow. So we often lose packets because of
41 * overruns while receiving from a fast remote host. 41 * overruns while receiving from a fast remote host.
42 * This can slow down TCP connections. Maybe the newer ni5210 cards are better. 42 * This can slow down TCP connections. Maybe the newer ni5210 cards are
43 * my experience is, that if a machine sends with more than about 500-600K/s 43 * better. My experience is, that if a machine sends with more than about
44 * the fifo/sysbus overflows. 44 * 500-600K/s the fifo/sysbus overflows.
45 * 45 *
46 * IMPORTANT NOTE: 46 * IMPORTANT NOTE:
47 * On fast networks, it's a (very) good idea to have 16K shared memory. With 47 * On fast networks, it's a (very) good idea to have 16K shared memory. With
48 * 8K, we can store only 4 receive frames, so it can (easily) happen that a remote 48 * 8K, we can store only 4 receive frames, so it can (easily) happen that a
49 * machine 'overruns' our system. 49 * remote machine 'overruns' our system.
50 * 50 *
51 * Known i82586/card problems (I'm sure, there are many more!): 51 * Known i82586/card problems (I'm sure, there are many more!):
52 * Running the NOP-mode, the i82586 sometimes seems to forget to report 52 * Running the NOP-mode, the i82586 sometimes seems to forget to report
@@ -60,7 +60,8 @@
60 * 60 *
61 * results from ftp performance tests with Linux 1.2.5 61 * results from ftp performance tests with Linux 1.2.5
62 * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s) 62 * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
63 * sending in NOP-mode: peak performance up to 530K/s (but better don't run this mode) 63 * sending in NOP-mode: peak performance up to 530K/s (but better don't
64 * run this mode)
64 */ 65 */
65 66
66/* 67/*
@@ -94,7 +95,8 @@
94 * 95 *
95 * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH) 96 * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
96 * 97 *
97 * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, too (MH) 98 * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff,
99 * too (MH)
98 * 100 *
99 * < 30.Sep.93: first versions 101 * < 30.Sep.93: first versions
100 */ 102 */
@@ -102,7 +104,7 @@
102static int debuglevel; /* debug-printk 0: off 1: a few 2: more */ 104static int debuglevel; /* debug-printk 0: off 1: a few 2: more */
103static int automatic_resume; /* experimental .. better should be zero */ 105static int automatic_resume; /* experimental .. better should be zero */
104static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */ 106static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
105static int fifo=0x8; /* don't change */ 107static int fifo = 0x8; /* don't change */
106 108
107#include <linux/module.h> 109#include <linux/module.h>
108#include <linux/kernel.h> 110#include <linux/kernel.h>
@@ -127,14 +129,15 @@ static int fifo=0x8; /* don't change */
127#define DEBUG /* debug on */ 129#define DEBUG /* debug on */
128#define SYSBUSVAL 1 /* 8 Bit */ 130#define SYSBUSVAL 1 /* 8 Bit */
129 131
130#define ni_attn586() {outb(0,dev->base_addr+NI52_ATTENTION);} 132#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); }
131#define ni_reset586() {outb(0,dev->base_addr+NI52_RESET);} 133#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); }
132#define ni_disint() {outb(0,dev->base_addr+NI52_INTDIS);} 134#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
133#define ni_enaint() {outb(0,dev->base_addr+NI52_INTENA);} 135#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
134 136
135#define make32(ptr16) (p->memtop + (short) (ptr16) ) 137#define make32(ptr16) (p->memtop + (short) (ptr16))
136#define make24(ptr32) ( ((char *) (ptr32)) - p->base) 138#define make24(ptr32) ((unsigned long)(ptr32)) - p->base
137#define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop )) 139#define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32)\
140 - (unsigned long) p->memtop))
138 141
139/******************* how to calculate the buffers ***************************** 142/******************* how to calculate the buffers *****************************
140 143
@@ -159,96 +162,112 @@ sizeof(nop_cmd) = 8;
159 162
160/**************************************************************************/ 163/**************************************************************************/
161 164
162/* different DELAYs */
163#define DELAY(x) mdelay(32 * x);
164#define DELAY_16(); { udelay(16); }
165#define DELAY_18(); { udelay(4); }
166
167/* wait for command with timeout: */
168#define WAIT_4_SCB_CMD() \
169{ int i; \
170 for(i=0;i<16384;i++) { \
171 if(!p->scb->cmd_cuc) break; \
172 DELAY_18(); \
173 if(i == 16383) { \
174 printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \
175 if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
176
177#define WAIT_4_SCB_CMD_RUC() { int i; \
178 for(i=0;i<16384;i++) { \
179 if(!p->scb->cmd_ruc) break; \
180 DELAY_18(); \
181 if(i == 16383) { \
182 printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \
183 if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } }
184
185#define WAIT_4_STAT_COMPL(addr) { int i; \
186 for(i=0;i<32767;i++) { \
187 if((addr)->cmd_status & STAT_COMPL) break; \
188 DELAY_16(); DELAY_16(); } }
189 165
190#define NI52_TOTAL_SIZE 16 166#define NI52_TOTAL_SIZE 16
191#define NI52_ADDR0 0x02 167#define NI52_ADDR0 0x02
192#define NI52_ADDR1 0x07 168#define NI52_ADDR1 0x07
193#define NI52_ADDR2 0x01 169#define NI52_ADDR2 0x01
194 170
195static int ni52_probe1(struct net_device *dev,int ioaddr); 171static int ni52_probe1(struct net_device *dev, int ioaddr);
196static irqreturn_t ni52_interrupt(int irq,void *dev_id); 172static irqreturn_t ni52_interrupt(int irq, void *dev_id);
197static int ni52_open(struct net_device *dev); 173static int ni52_open(struct net_device *dev);
198static int ni52_close(struct net_device *dev); 174static int ni52_close(struct net_device *dev);
199static int ni52_send_packet(struct sk_buff *,struct net_device *); 175static int ni52_send_packet(struct sk_buff *, struct net_device *);
200static struct net_device_stats *ni52_get_stats(struct net_device *dev); 176static struct net_device_stats *ni52_get_stats(struct net_device *dev);
201static void set_multicast_list(struct net_device *dev); 177static void set_multicast_list(struct net_device *dev);
202static void ni52_timeout(struct net_device *dev); 178static void ni52_timeout(struct net_device *dev);
203#if 0
204static void ni52_dump(struct net_device *,void *);
205#endif
206 179
207/* helper-functions */ 180/* helper-functions */
208static int init586(struct net_device *dev); 181static int init586(struct net_device *dev);
209static int check586(struct net_device *dev,char *where,unsigned size); 182static int check586(struct net_device *dev, char *where, unsigned size);
210static void alloc586(struct net_device *dev); 183static void alloc586(struct net_device *dev);
211static void startrecv586(struct net_device *dev); 184static void startrecv586(struct net_device *dev);
212static void *alloc_rfa(struct net_device *dev,void *ptr); 185static void *alloc_rfa(struct net_device *dev, void *ptr);
213static void ni52_rcv_int(struct net_device *dev); 186static void ni52_rcv_int(struct net_device *dev);
214static void ni52_xmt_int(struct net_device *dev); 187static void ni52_xmt_int(struct net_device *dev);
215static void ni52_rnr_int(struct net_device *dev); 188static void ni52_rnr_int(struct net_device *dev);
216 189
217struct priv 190struct priv {
218{
219 struct net_device_stats stats; 191 struct net_device_stats stats;
220 unsigned long base; 192 unsigned long base;
221 char *memtop; 193 char *memtop;
222 long int lock; 194 spinlock_t spinlock;
223 int reseted; 195 int reset;
224 volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first; 196 struct rfd_struct *rfd_last, *rfd_top, *rfd_first;
225 volatile struct scp_struct *scp; /* volatile is important */ 197 struct scp_struct *scp;
226 volatile struct iscp_struct *iscp; /* volatile is important */ 198 struct iscp_struct *iscp;
227 volatile struct scb_struct *scb; /* volatile is important */ 199 struct scb_struct *scb;
228 volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; 200 struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS];
229#if (NUM_XMIT_BUFFS == 1) 201#if (NUM_XMIT_BUFFS == 1)
230 volatile struct transmit_cmd_struct *xmit_cmds[2]; 202 struct transmit_cmd_struct *xmit_cmds[2];
231 volatile struct nop_cmd_struct *nop_cmds[2]; 203 struct nop_cmd_struct *nop_cmds[2];
232#else 204#else
233 volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; 205 struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS];
234 volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; 206 struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS];
235#endif 207#endif
236 volatile int nop_point,num_recv_buffs; 208 int nop_point, num_recv_buffs;
237 volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; 209 char *xmit_cbuffs[NUM_XMIT_BUFFS];
238 volatile int xmit_count,xmit_last; 210 int xmit_count, xmit_last;
239}; 211};
240 212
213/* wait for command with timeout: */
214static void wait_for_scb_cmd(struct net_device *dev)
215{
216 struct priv *p = dev->priv;
217 int i;
218 for (i = 0; i < 16384; i++) {
219 if (readb(&p->scb->cmd_cuc) == 0)
220 break;
221 udelay(4);
222 if (i == 16383) {
223 printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",
224 dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus));
225 if (!p->reset) {
226 p->reset = 1;
227 ni_reset586();
228 }
229 }
230 }
231}
232
233static void wait_for_scb_cmd_ruc(struct net_device *dev)
234{
235 struct priv *p = dev->priv;
236 int i;
237 for (i = 0; i < 16384; i++) {
238 if (readb(&p->scb->cmd_ruc) == 0)
239 break;
240 udelay(4);
241 if (i == 16383) {
242 printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
243 dev->name, p->scb->cmd_ruc, p->scb->rus);
244 if (!p->reset) {
245 p->reset = 1;
246 ni_reset586();
247 }
248 }
249 }
250}
251
252static void wait_for_stat_compl(void *p)
253{
254 struct nop_cmd_struct *addr = p;
255 int i;
256 for (i = 0; i < 32767; i++) {
257 if (readw(&((addr)->cmd_status)) & STAT_COMPL)
258 break;
259 udelay(32);
260 }
261}
262
241/********************************************** 263/**********************************************
242 * close device 264 * close device
243 */ 265 */
244static int ni52_close(struct net_device *dev) 266static int ni52_close(struct net_device *dev)
245{ 267{
246 free_irq(dev->irq, dev); 268 free_irq(dev->irq, dev);
247
248 ni_reset586(); /* the hard way to stop the receiver */ 269 ni_reset586(); /* the hard way to stop the receiver */
249
250 netif_stop_queue(dev); 270 netif_stop_queue(dev);
251
252 return 0; 271 return 0;
253} 272}
254 273
@@ -265,55 +284,53 @@ static int ni52_open(struct net_device *dev)
265 startrecv586(dev); 284 startrecv586(dev);
266 ni_enaint(); 285 ni_enaint();
267 286
268 ret = request_irq(dev->irq, &ni52_interrupt,0,dev->name,dev); 287 ret = request_irq(dev->irq, &ni52_interrupt, 0, dev->name, dev);
269 if (ret) 288 if (ret) {
270 {
271 ni_reset586(); 289 ni_reset586();
272 return ret; 290 return ret;
273 } 291 }
274
275 netif_start_queue(dev); 292 netif_start_queue(dev);
276
277 return 0; /* most done by init */ 293 return 0; /* most done by init */
278} 294}
279 295
280/********************************************** 296/**********************************************
281 * Check to see if there's an 82586 out there. 297 * Check to see if there's an 82586 out there.
282 */ 298 */
283static int check586(struct net_device *dev,char *where,unsigned size) 299static int check586(struct net_device *dev, char *where, unsigned size)
284{ 300{
285 struct priv pb; 301 struct priv pb;
286 struct priv *p = /* (struct priv *) dev->priv*/ &pb; 302 struct priv *p = /* (struct priv *) dev->priv*/ &pb;
287 char *iscp_addrs[2]; 303 char *iscp_addrs[2];
288 int i; 304 int i;
289 305
290 p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000; 306 p->base = (unsigned long) isa_bus_to_virt((unsigned long)where)
307 + size - 0x01000000;
291 p->memtop = isa_bus_to_virt((unsigned long)where) + size; 308 p->memtop = isa_bus_to_virt((unsigned long)where) + size;
292 p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); 309 p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS);
293 memset((char *)p->scp,0, sizeof(struct scp_struct)); 310 memset_io((char *)p->scp, 0, sizeof(struct scp_struct));
294 for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */ 311 for (i = 0; i < sizeof(struct scp_struct); i++)
295 if(((char *)p->scp)[i]) 312 /* memory was writeable? */
313 if (readb((char *)p->scp + i))
296 return 0; 314 return 0;
297 p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ 315 writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */
298 if(p->scp->sysbus != SYSBUSVAL) 316 if (readb(&p->scp->sysbus) != SYSBUSVAL)
299 return 0; 317 return 0;
300 318
301 iscp_addrs[0] = isa_bus_to_virt((unsigned long)where); 319 iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
302 iscp_addrs[1]= (char *) p->scp - sizeof(struct iscp_struct); 320 iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
303 321
304 for(i=0;i<2;i++) 322 for (i = 0; i < 2; i++) {
305 {
306 p->iscp = (struct iscp_struct *) iscp_addrs[i]; 323 p->iscp = (struct iscp_struct *) iscp_addrs[i];
307 memset((char *)p->iscp,0, sizeof(struct iscp_struct)); 324 memset_io((char *)p->iscp, 0, sizeof(struct iscp_struct));
308 325
309 p->scp->iscp = make24(p->iscp); 326 writel(make24(p->iscp), &p->scp->iscp);
310 p->iscp->busy = 1; 327 writeb(1, &p->iscp->busy);
311 328
312 ni_reset586(); 329 ni_reset586();
313 ni_attn586(); 330 ni_attn586();
314 DELAY(1); /* wait a while... */ 331 mdelay(32); /* wait a while... */
315 332 /* i82586 clears 'busy' after successful init */
316 if(p->iscp->busy) /* i82586 clears 'busy' after successful init */ 333 if (readb(&p->iscp->busy))
317 return 0; 334 return 0;
318 } 335 }
319 return 1; 336 return 1;
@@ -327,36 +344,39 @@ static void alloc586(struct net_device *dev)
327 struct priv *p = (struct priv *) dev->priv; 344 struct priv *p = (struct priv *) dev->priv;
328 345
329 ni_reset586(); 346 ni_reset586();
330 DELAY(1); 347 mdelay(32);
348
349 spin_lock_init(&p->spinlock);
331 350
332 p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); 351 p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
333 p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start); 352 p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
334 p->iscp = (struct iscp_struct *) ((char *)p->scp - sizeof(struct iscp_struct)); 353 p->iscp = (struct iscp_struct *)
354 ((char *)p->scp - sizeof(struct iscp_struct));
335 355
336 memset((char *) p->iscp,0,sizeof(struct iscp_struct)); 356 memset_io(p->iscp, 0, sizeof(struct iscp_struct));
337 memset((char *) p->scp ,0,sizeof(struct scp_struct)); 357 memset_io(p->scp , 0, sizeof(struct scp_struct));
338 358
339 p->scp->iscp = make24(p->iscp); 359 writel(make24(p->iscp), &p->scp->iscp);
340 p->scp->sysbus = SYSBUSVAL; 360 writeb(SYSBUSVAL, &p->scp->sysbus);
341 p->iscp->scb_offset = make16(p->scb); 361 writew(make16(p->scb), &p->iscp->scb_offset);
342 362
343 p->iscp->busy = 1; 363 writeb(1, &p->iscp->busy);
344 ni_reset586(); 364 ni_reset586();
345 ni_attn586(); 365 ni_attn586();
346 366
347 DELAY(1); 367 mdelay(32);
348 368
349 if(p->iscp->busy) 369 if (readb(&p->iscp->busy))
350 printk("%s: Init-Problems (alloc).\n",dev->name); 370 printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
351 371
352 p->reseted = 0; 372 p->reset = 0;
353 373
354 memset((char *)p->scb,0,sizeof(struct scb_struct)); 374 memset_io((char *)p->scb, 0, sizeof(struct scb_struct));
355} 375}
356 376
357/* set: io,irq,memstart,memend or set it when calling insmod */ 377/* set: io,irq,memstart,memend or set it when calling insmod */
358static int irq=9; 378static int irq = 9;
359static int io=0x300; 379static int io = 0x300;
360static long memstart; /* e.g 0xd0000 */ 380static long memstart; /* e.g 0xd0000 */
361static long memend; /* e.g 0xd4000 */ 381static long memend; /* e.g 0xd4000 */
362 382
@@ -413,7 +433,7 @@ out:
413 return ERR_PTR(err); 433 return ERR_PTR(err);
414} 434}
415 435
416static int __init ni52_probe1(struct net_device *dev,int ioaddr) 436static int __init ni52_probe1(struct net_device *dev, int ioaddr)
417{ 437{
418 int i, size, retval; 438 int i, size, retval;
419 439
@@ -425,90 +445,96 @@ static int __init ni52_probe1(struct net_device *dev,int ioaddr)
425 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME)) 445 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
426 return -EBUSY; 446 return -EBUSY;
427 447
428 if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) || 448 if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
429 !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) { 449 !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) {
430 retval = -ENODEV; 450 retval = -ENODEV;
431 goto out; 451 goto out;
432 } 452 }
433 453
434 for(i=0;i<ETH_ALEN;i++) 454 for (i = 0; i < ETH_ALEN; i++)
435 dev->dev_addr[i] = inb(dev->base_addr+i); 455 dev->dev_addr[i] = inb(dev->base_addr+i);
436 456
437 if(dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 457 if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1
438 || dev->dev_addr[2] != NI52_ADDR2) { 458 || dev->dev_addr[2] != NI52_ADDR2) {
439 retval = -ENODEV; 459 retval = -ENODEV;
440 goto out; 460 goto out;
441 } 461 }
442 462
443 printk(KERN_INFO "%s: NI5210 found at %#3lx, ",dev->name,dev->base_addr); 463 printk(KERN_INFO "%s: NI5210 found at %#3lx, ",
464 dev->name, dev->base_addr);
444 465
445 /* 466 /*
446 * check (or search) IO-Memory, 8K and 16K 467 * check (or search) IO-Memory, 8K and 16K
447 */ 468 */
448#ifdef MODULE 469#ifdef MODULE
449 size = dev->mem_end - dev->mem_start; 470 size = dev->mem_end - dev->mem_start;
450 if(size != 0x2000 && size != 0x4000) { 471 if (size != 0x2000 && size != 0x4000) {
451 printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n",dev->name,size); 472 printk("\n");
473 printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size);
452 retval = -ENODEV; 474 retval = -ENODEV;
453 goto out; 475 goto out;
454 } 476 }
455 if(!check586(dev,(char *) dev->mem_start,size)) { 477 if (!check586(dev, (char *)dev->mem_start, size)) {
456 printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size); 478 printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
457 retval = -ENODEV; 479 retval = -ENODEV;
458 goto out; 480 goto out;
459 } 481 }
460#else 482#else
461 if(dev->mem_start != 0) /* no auto-mem-probe */ 483 if (dev->mem_start != 0) {
462 { 484 /* no auto-mem-probe */
463 size = 0x4000; /* check for 16K mem */ 485 size = 0x4000; /* check for 16K mem */
464 if(!check586(dev,(char *) dev->mem_start,size)) { 486 if (!check586(dev, (char *) dev->mem_start, size)) {
465 size = 0x2000; /* check for 8K mem */ 487 size = 0x2000; /* check for 8K mem */
466 if(!check586(dev,(char *) dev->mem_start,size)) { 488 if (!check586(dev, (char *)dev->mem_start, size)) {
467 printk("?memprobe, Can't find memory at 0x%lx!\n",dev->mem_start); 489 printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
468 retval = -ENODEV; 490 retval = -ENODEV;
469 goto out; 491 goto out;
470 } 492 }
471 } 493 }
472 } 494 } else {
473 else 495 static const unsigned long memaddrs[] = {
474 { 496 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000,
475 static long memaddrs[] = { 0xc8000,0xca000,0xcc000,0xce000,0xd0000,0xd2000, 497 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0
476 0xd4000,0xd6000,0xd8000,0xda000,0xdc000, 0 }; 498 };
477 for(i=0;;i++) 499 for (i = 0;; i++) {
478 { 500 if (!memaddrs[i]) {
479 if(!memaddrs[i]) { 501 printk(KERN_ERR "?memprobe, Can't find io-memory!\n");
480 printk("?memprobe, Can't find io-memory!\n");
481 retval = -ENODEV; 502 retval = -ENODEV;
482 goto out; 503 goto out;
483 } 504 }
484 dev->mem_start = memaddrs[i]; 505 dev->mem_start = memaddrs[i];
485 size = 0x2000; /* check for 8K mem */ 506 size = 0x2000; /* check for 8K mem */
486 if(check586(dev,(char *)dev->mem_start,size)) /* 8K-check */ 507 if (check586(dev, (char *)dev->mem_start, size))
508 /* 8K-check */
487 break; 509 break;
488 size = 0x4000; /* check for 16K mem */ 510 size = 0x4000; /* check for 16K mem */
489 if(check586(dev,(char *)dev->mem_start,size)) /* 16K-check */ 511 if (check586(dev, (char *)dev->mem_start, size))
512 /* 16K-check */
490 break; 513 break;
491 } 514 }
492 } 515 }
493 dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */ 516 /* set mem_end showed by 'ifconfig' */
517 dev->mem_end = dev->mem_start + size;
494#endif 518#endif
495 519
496 memset((char *) dev->priv,0,sizeof(struct priv)); 520 memset((char *)dev->priv, 0, sizeof(struct priv));
497 521
498 ((struct priv *) (dev->priv))->memtop = isa_bus_to_virt(dev->mem_start) + size; 522 ((struct priv *)(dev->priv))->memtop =
499 ((struct priv *) (dev->priv))->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000; 523 isa_bus_to_virt(dev->mem_start) + size;
524 ((struct priv *)(dev->priv))->base = (unsigned long)
525 isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
500 alloc586(dev); 526 alloc586(dev);
501 527
502 /* set number of receive-buffs according to memsize */ 528 /* set number of receive-buffs according to memsize */
503 if(size == 0x2000) 529 if (size == 0x2000)
504 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8; 530 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8;
505 else 531 else
506 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16; 532 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16;
507 533
508 printk("Memaddr: 0x%lx, Memsize: %d, ",dev->mem_start,size); 534 printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
535 dev->mem_start, size);
509 536
510 if(dev->irq < 2) 537 if (dev->irq < 2) {
511 {
512 unsigned long irq_mask; 538 unsigned long irq_mask;
513 539
514 irq_mask = probe_irq_on(); 540 irq_mask = probe_irq_on();
@@ -517,18 +543,16 @@ static int __init ni52_probe1(struct net_device *dev,int ioaddr)
517 543
518 mdelay(20); 544 mdelay(20);
519 dev->irq = probe_irq_off(irq_mask); 545 dev->irq = probe_irq_off(irq_mask);
520 if(!dev->irq) 546 if (!dev->irq) {
521 {
522 printk("?autoirq, Failed to detect IRQ line!\n"); 547 printk("?autoirq, Failed to detect IRQ line!\n");
523 retval = -EAGAIN; 548 retval = -EAGAIN;
524 goto out; 549 goto out;
525 } 550 }
526 printk("IRQ %d (autodetected).\n",dev->irq); 551 printk("IRQ %d (autodetected).\n", dev->irq);
527 } 552 } else {
528 else { 553 if (dev->irq == 2)
529 if(dev->irq == 2)
530 dev->irq = 9; 554 dev->irq = 9;
531 printk("IRQ %d (assigned and not checked!).\n",dev->irq); 555 printk("IRQ %d (assigned and not checked!).\n", dev->irq);
532 } 556 }
533 557
534 dev->open = ni52_open; 558 dev->open = ni52_open;
@@ -555,56 +579,58 @@ out:
555static int init586(struct net_device *dev) 579static int init586(struct net_device *dev)
556{ 580{
557 void *ptr; 581 void *ptr;
558 int i,result=0; 582 int i, result = 0;
559 struct priv *p = (struct priv *) dev->priv; 583 struct priv *p = (struct priv *)dev->priv;
560 volatile struct configure_cmd_struct *cfg_cmd; 584 struct configure_cmd_struct *cfg_cmd;
561 volatile struct iasetup_cmd_struct *ias_cmd; 585 struct iasetup_cmd_struct *ias_cmd;
562 volatile struct tdr_cmd_struct *tdr_cmd; 586 struct tdr_cmd_struct *tdr_cmd;
563 volatile struct mcsetup_cmd_struct *mc_cmd; 587 struct mcsetup_cmd_struct *mc_cmd;
564 struct dev_mc_list *dmi=dev->mc_list; 588 struct dev_mc_list *dmi = dev->mc_list;
565 int num_addrs=dev->mc_count; 589 int num_addrs = dev->mc_count;
566 590
567 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); 591 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
568 592
569 cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ 593 cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */
570 cfg_cmd->cmd_status = 0; 594 writew(0, &cfg_cmd->cmd_status);
571 cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST; 595 writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
572 cfg_cmd->cmd_link = 0xffff; 596 writew(0xFFFF, &cfg_cmd->cmd_link);
573 597
574 cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ 598 /* number of cfg bytes */
575 cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */ 599 writeb(0x0a, &cfg_cmd->byte_cnt);
576 cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ 600 /* fifo-limit (8=tx:32/rx:64) */
577 cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ 601 writeb(fifo, &cfg_cmd->fifo);
578 cfg_cmd->priority = 0x00; 602 /* hold or discard bad recv frames (bit 7) */
579 cfg_cmd->ifs = 0x60; 603 writeb(0x40, &cfg_cmd->sav_bf);
580 cfg_cmd->time_low = 0x00; 604 /* addr_len |!src_insert |pre-len |loopback */
581 cfg_cmd->time_high = 0xf2; 605 writeb(0x2e, &cfg_cmd->adr_len);
582 cfg_cmd->promisc = 0; 606 writeb(0x00, &cfg_cmd->priority);
583 if(dev->flags & IFF_ALLMULTI) { 607 writeb(0x60, &cfg_cmd->ifs);;
608 writeb(0x00, &cfg_cmd->time_low);
609 writeb(0xf2, &cfg_cmd->time_high);
610 writeb(0x00, &cfg_cmd->promisc);;
611 if (dev->flags & IFF_ALLMULTI) {
584 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 612 int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
585 if(num_addrs > len) { 613 if (num_addrs > len) {
586 printk("%s: switching to promisc. mode\n",dev->name); 614 printk(KERN_ERR "%s: switching to promisc. mode\n",
587 dev->flags|=IFF_PROMISC; 615 dev->name);
616 dev->flags |= IFF_PROMISC;
588 } 617 }
589 } 618 }
590 if(dev->flags&IFF_PROMISC) 619 if (dev->flags & IFF_PROMISC)
591 { 620 writeb(0x01, &cfg_cmd->promisc);
592 cfg_cmd->promisc=1; 621 writeb(0x00, &cfg_cmd->carr_coll);
593 dev->flags|=IFF_PROMISC; 622 writew(make16(cfg_cmd), &p->scb->cbl_offset);
594 } 623 writew(0, &p->scb->cmd_ruc);
595 cfg_cmd->carr_coll = 0x00;
596 624
597 p->scb->cbl_offset = make16(cfg_cmd); 625 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
598 p->scb->cmd_ruc = 0;
599
600 p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */
601 ni_attn586(); 626 ni_attn586();
602 627
603 WAIT_4_STAT_COMPL(cfg_cmd); 628 wait_for_stat_compl(cfg_cmd);
604 629
605 if((cfg_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK)) 630 if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
606 { 631 (STAT_COMPL|STAT_OK)) {
607 printk("%s: configure command failed: %x\n",dev->name,cfg_cmd->cmd_status); 632 printk(KERN_ERR "%s: configure command failed: %x\n",
633 dev->name, readw(&cfg_cmd->cmd_status));
608 return 1; 634 return 1;
609 } 635 }
610 636
@@ -614,21 +640,22 @@ static int init586(struct net_device *dev)
614 640
615 ias_cmd = (struct iasetup_cmd_struct *)ptr; 641 ias_cmd = (struct iasetup_cmd_struct *)ptr;
616 642
617 ias_cmd->cmd_status = 0; 643 writew(0, &ias_cmd->cmd_status);
618 ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST; 644 writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
619 ias_cmd->cmd_link = 0xffff; 645 writew(0xffff, &ias_cmd->cmd_link);
620 646
621 memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); 647 memcpy_toio((char *)&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
622 648
623 p->scb->cbl_offset = make16(ias_cmd); 649 writew(make16(ias_cmd), &p->scb->cbl_offset);
624 650
625 p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ 651 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
626 ni_attn586(); 652 ni_attn586();
627 653
628 WAIT_4_STAT_COMPL(ias_cmd); 654 wait_for_stat_compl(ias_cmd);
629 655
630 if((ias_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) { 656 if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
631 printk("%s (ni52): individual address setup command failed: %04x\n",dev->name,ias_cmd->cmd_status); 657 (STAT_OK|STAT_COMPL)) {
658 printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status));
632 return 1; 659 return 1;
633 } 660 }
634 661
@@ -638,117 +665,119 @@ static int init586(struct net_device *dev)
638 665
639 tdr_cmd = (struct tdr_cmd_struct *)ptr; 666 tdr_cmd = (struct tdr_cmd_struct *)ptr;
640 667
641 tdr_cmd->cmd_status = 0; 668 writew(0, &tdr_cmd->cmd_status);
642 tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST; 669 writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
643 tdr_cmd->cmd_link = 0xffff; 670 writew(0xffff, &tdr_cmd->cmd_link);
644 tdr_cmd->status = 0; 671 writew(0, &tdr_cmd->status);
645 672
646 p->scb->cbl_offset = make16(tdr_cmd); 673 writew(make16(tdr_cmd), &p->scb->cbl_offset);
647 p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ 674 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
648 ni_attn586(); 675 ni_attn586();
649 676
650 WAIT_4_STAT_COMPL(tdr_cmd); 677 wait_for_stat_compl(tdr_cmd);
651
652 if(!(tdr_cmd->cmd_status & STAT_COMPL))
653 {
654 printk("%s: Problems while running the TDR.\n",dev->name);
655 }
656 else
657 {
658 DELAY_16(); /* wait for result */
659 result = tdr_cmd->status;
660 678
661 p->scb->cmd_cuc = p->scb->cus & STAT_MASK; 679 if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL))
680 printk(KERN_ERR "%s: Problems while running the TDR.\n",
681 dev->name);
682 else {
683 udelay(16);
684 result = readw(&tdr_cmd->status);
685 writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
662 ni_attn586(); /* ack the interrupts */ 686 ni_attn586(); /* ack the interrupts */
663 687
664 if(result & TDR_LNK_OK) 688 if (result & TDR_LNK_OK)
665 ; 689 ;
666 else if(result & TDR_XCVR_PRB) 690 else if (result & TDR_XCVR_PRB)
667 printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name); 691 printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n",
668 else if(result & TDR_ET_OPN) 692 dev->name);
669 printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK); 693 else if (result & TDR_ET_OPN)
670 else if(result & TDR_ET_SRT) 694 printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n",
671 { 695 dev->name, result & TDR_TIMEMASK);
672 if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ 696 else if (result & TDR_ET_SRT) {
673 printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK); 697 /* time == 0 -> strange :-) */
674 } 698 if (result & TDR_TIMEMASK)
675 else 699 printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n",
676 printk("%s: TDR: Unknown status %04x\n",dev->name,result); 700 dev->name, result & TDR_TIMEMASK);
701 } else
702 printk(KERN_ERR "%s: TDR: Unknown status %04x\n",
703 dev->name, result);
677 } 704 }
678 705
679 /* 706 /*
680 * Multicast setup 707 * Multicast setup
681 */ 708 */
682 if(num_addrs && !(dev->flags & IFF_PROMISC) ) 709 if (num_addrs && !(dev->flags & IFF_PROMISC)) {
683 {
684 mc_cmd = (struct mcsetup_cmd_struct *) ptr; 710 mc_cmd = (struct mcsetup_cmd_struct *) ptr;
685 mc_cmd->cmd_status = 0; 711 writew(0, &mc_cmd->cmd_status);
686 mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST; 712 writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
687 mc_cmd->cmd_link = 0xffff; 713 writew(0xffff, &mc_cmd->cmd_link);
688 mc_cmd->mc_cnt = num_addrs * 6; 714 writew(num_addrs * 6, &mc_cmd->mc_cnt);
689 715
690 for(i=0;i<num_addrs;i++,dmi=dmi->next) 716 for (i = 0; i < num_addrs; i++, dmi = dmi->next)
691 memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6); 717 memcpy_toio((char *) mc_cmd->mc_list[i],
718 dmi->dmi_addr, 6);
692 719
693 p->scb->cbl_offset = make16(mc_cmd); 720 writew(make16(mc_cmd), &p->scb->cbl_offset);
694 p->scb->cmd_cuc = CUC_START; 721 writeb(CUC_START, &p->scb->cmd_cuc);
695 ni_attn586(); 722 ni_attn586();
696 723
697 WAIT_4_STAT_COMPL(mc_cmd); 724 wait_for_stat_compl(mc_cmd);
698 725
699 if( (mc_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) 726 if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK))
700 printk("%s: Can't apply multicast-address-list.\n",dev->name); 727 != (STAT_COMPL|STAT_OK))
728 printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name);
701 } 729 }
702 730
703 /* 731 /*
704 * alloc nop/xmit-cmds 732 * alloc nop/xmit-cmds
705 */ 733 */
706#if (NUM_XMIT_BUFFS == 1) 734#if (NUM_XMIT_BUFFS == 1)
707 for(i=0;i<2;i++) 735 for (i = 0; i < 2; i++) {
708 { 736 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
709 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 737 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
710 p->nop_cmds[i]->cmd_cmd = CMD_NOP; 738 writew(0, &p->nop_cmds[i]->cmd_status);
711 p->nop_cmds[i]->cmd_status = 0; 739 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
712 p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
713 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 740 ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
714 } 741 }
715#else 742#else
716 for(i=0;i<NUM_XMIT_BUFFS;i++) 743 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
717 { 744 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr;
718 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 745 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
719 p->nop_cmds[i]->cmd_cmd = CMD_NOP; 746 writew(0, &p->nop_cmds[i]->cmd_status);
720 p->nop_cmds[i]->cmd_status = 0; 747 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
721 p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i]));
722 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 748 ptr = (char *) ptr + sizeof(struct nop_cmd_struct);
723 } 749 }
724#endif 750#endif
725 751
726 ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ 752 ptr = alloc_rfa(dev, (void *)ptr); /* init receive-frame-area */
727 753
728 /* 754 /*
729 * alloc xmit-buffs / init xmit_cmds 755 * alloc xmit-buffs / init xmit_cmds
730 */ 756 */
731 for(i=0;i<NUM_XMIT_BUFFS;i++) 757 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
732 { 758 /* Transmit cmd/buff 0 */
733 p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/ 759 p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr;
734 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); 760 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct);
735 p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ 761 p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */
736 ptr = (char *) ptr + XMIT_BUFF_SIZE; 762 ptr = (char *) ptr + XMIT_BUFF_SIZE;
737 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ 763 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
738 ptr = (char *) ptr + sizeof(struct tbd_struct); 764 ptr = (char *) ptr + sizeof(struct tbd_struct);
739 if((void *)ptr > (void *)p->iscp) 765 if ((void *)ptr > (void *)p->iscp) {
740 { 766 printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
741 printk("%s: not enough shared-mem for your configuration!\n",dev->name); 767 dev->name);
742 return 1; 768 return 1;
743 } 769 }
744 memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct)); 770 memset_io((char *)(p->xmit_cmds[i]), 0,
745 memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct)); 771 sizeof(struct transmit_cmd_struct));
746 p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]); 772 memset_io((char *)(p->xmit_buffs[i]), 0,
747 p->xmit_cmds[i]->cmd_status = STAT_COMPL; 773 sizeof(struct tbd_struct));
748 p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT; 774 writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
749 p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); 775 &p->xmit_cmds[i]->cmd_link);
750 p->xmit_buffs[i]->next = 0xffff; 776 writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status);
751 p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); 777 writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd);
778 writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset);
779 writew(0xffff, &p->xmit_buffs[i]->next);
780 writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer);
752 } 781 }
753 782
754 p->xmit_count = 0; 783 p->xmit_count = 0;
@@ -761,21 +790,21 @@ static int init586(struct net_device *dev)
761 * 'start transmitter' 790 * 'start transmitter'
762 */ 791 */
763#ifndef NO_NOPCOMMANDS 792#ifndef NO_NOPCOMMANDS
764 p->scb->cbl_offset = make16(p->nop_cmds[0]); 793 writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset);
765 p->scb->cmd_cuc = CUC_START; 794 writeb(CUC_START, &p->scb->cmd_cuc);
766 ni_attn586(); 795 ni_attn586();
767 WAIT_4_SCB_CMD(); 796 wait_for_scb_cmd(dev);
768#else 797#else
769 p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]); 798 writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link);
770 p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_SUSPEND | CMD_INT; 799 writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd);
771#endif 800#endif
772 801
773 /* 802 /*
774 * ack. interrupts 803 * ack. interrupts
775 */ 804 */
776 p->scb->cmd_cuc = p->scb->cus & STAT_MASK; 805 writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
777 ni_attn586(); 806 ni_attn586();
778 DELAY_16(); 807 udelay(16);
779 808
780 ni_enaint(); 809 ni_enaint();
781 810
@@ -787,43 +816,45 @@ static int init586(struct net_device *dev)
787 * It sets up the Receive Frame Area (RFA). 816 * It sets up the Receive Frame Area (RFA).
788 */ 817 */
789 818
790static void *alloc_rfa(struct net_device *dev,void *ptr) 819static void *alloc_rfa(struct net_device *dev, void *ptr)
791{ 820{
792 volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; 821 struct rfd_struct *rfd = (struct rfd_struct *)ptr;
793 volatile struct rbd_struct *rbd; 822 struct rbd_struct *rbd;
794 int i; 823 int i;
795 struct priv *p = (struct priv *) dev->priv; 824 struct priv *p = (struct priv *) dev->priv;
796 825
797 memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); 826 memset_io((char *) rfd, 0,
827 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
798 p->rfd_first = rfd; 828 p->rfd_first = rfd;
799 829
800 for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) { 830 for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) {
801 rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); 831 writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)),
802 rfd[i].rbd_offset = 0xffff; 832 &rfd[i].next);
833 writew(0xffff, &rfd[i].rbd_offset);
803 } 834 }
804 rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */ 835 /* RU suspend */
836 writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
805 837
806 ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) ); 838 ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd));
807 839
808 rbd = (struct rbd_struct *) ptr; 840 rbd = (struct rbd_struct *) ptr;
809 ptr = (void *) (rbd + p->num_recv_buffs); 841 ptr = (void *) (rbd + p->num_recv_buffs);
810 842
811 /* clr descriptors */ 843 /* clr descriptors */
812 memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs)); 844 memset_io((char *)rbd, 0,
845 sizeof(struct rbd_struct) * (p->num_recv_buffs));
813 846
814 for(i=0;i<p->num_recv_buffs;i++) 847 for (i = 0; i < p->num_recv_buffs; i++) {
815 { 848 writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
816 rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs)); 849 writew(RECV_BUFF_SIZE, &rbd[i].size);
817 rbd[i].size = RECV_BUFF_SIZE; 850 writel(make24(ptr), &rbd[i].buffer);
818 rbd[i].buffer = make24(ptr);
819 ptr = (char *) ptr + RECV_BUFF_SIZE; 851 ptr = (char *) ptr + RECV_BUFF_SIZE;
820 } 852 }
821
822 p->rfd_top = p->rfd_first; 853 p->rfd_top = p->rfd_first;
823 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); 854 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
824 855
825 p->scb->rfa_offset = make16(p->rfd_first); 856 writew(make16(p->rfd_first), &p->scb->rfa_offset);
826 p->rfd_first->rbd_offset = make16(rbd); 857 writew(make16(rbd), &p->rfd_first->rbd_offset);
827 858
828 return ptr; 859 return ptr;
829} 860}
@@ -833,73 +864,71 @@ static void *alloc_rfa(struct net_device *dev,void *ptr)
833 * Interrupt Handler ... 864 * Interrupt Handler ...
834 */ 865 */
835 866
836static irqreturn_t ni52_interrupt(int irq,void *dev_id) 867static irqreturn_t ni52_interrupt(int irq, void *dev_id)
837{ 868{
838 struct net_device *dev = dev_id; 869 struct net_device *dev = dev_id;
839 unsigned short stat; 870 unsigned int stat;
840 int cnt=0; 871 int cnt = 0;
841 struct priv *p; 872 struct priv *p;
842 873
843 if (!dev) {
844 printk ("ni5210-interrupt: irq %d for unknown device.\n",irq);
845 return IRQ_NONE;
846 }
847 p = (struct priv *) dev->priv; 874 p = (struct priv *) dev->priv;
848 875
849 if(debuglevel > 1) 876 if (debuglevel > 1)
850 printk("I"); 877 printk("I");
851 878
852 WAIT_4_SCB_CMD(); /* wait for last command */ 879 spin_lock(&p->spinlock);
853 880
854 while((stat=p->scb->cus & STAT_MASK)) 881 wait_for_scb_cmd(dev); /* wait for last command */
855 { 882
856 p->scb->cmd_cuc = stat; 883 while ((stat = readb(&p->scb->cus) & STAT_MASK)) {
884 writeb(stat, &p->scb->cmd_cuc);
857 ni_attn586(); 885 ni_attn586();
858 886
859 if(stat & STAT_FR) /* received a frame */ 887 if (stat & STAT_FR) /* received a frame */
860 ni52_rcv_int(dev); 888 ni52_rcv_int(dev);
861 889
862 if(stat & STAT_RNR) /* RU went 'not ready' */ 890 if (stat & STAT_RNR) { /* RU went 'not ready' */
863 {
864 printk("(R)"); 891 printk("(R)");
865 if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */ 892 if (readb(&p->scb->rus) & RU_SUSPEND) {
866 { 893 /* special case: RU_SUSPEND */
867 WAIT_4_SCB_CMD(); 894 wait_for_scb_cmd(dev);
868 p->scb->cmd_ruc = RUC_RESUME; 895 p->scb->cmd_ruc = RUC_RESUME;
869 ni_attn586(); 896 ni_attn586();
870 WAIT_4_SCB_CMD_RUC(); 897 wait_for_scb_cmd_ruc(dev);
871 } 898 } else {
872 else 899 printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",
873 { 900 dev->name, stat, readb(&p->scb->rus));
874 printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus);
875 ni52_rnr_int(dev); 901 ni52_rnr_int(dev);
876 } 902 }
877 } 903 }
878 904
879 if(stat & STAT_CX) /* command with I-bit set complete */ 905 /* Command with I-bit set complete */
906 if (stat & STAT_CX)
880 ni52_xmt_int(dev); 907 ni52_xmt_int(dev);
881 908
882#ifndef NO_NOPCOMMANDS 909#ifndef NO_NOPCOMMANDS
883 if(stat & STAT_CNA) /* CU went 'not ready' */ 910 if (stat & STAT_CNA) { /* CU went 'not ready' */
884 { 911 if (netif_running(dev))
885 if(netif_running(dev)) 912 printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n",
886 printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus); 913 dev->name, stat, readb(&p->scb->cus));
887 } 914 }
888#endif 915#endif
889 916
890 if(debuglevel > 1) 917 if (debuglevel > 1)
891 printk("%d",cnt++); 918 printk("%d", cnt++);
892 919
893 WAIT_4_SCB_CMD(); /* wait for ack. (ni52_xmt_int can be faster than ack!!) */ 920 /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */
894 if(p->scb->cmd_cuc) /* timed out? */ 921 wait_for_scb_cmd(dev);
895 { 922 if (p->scb->cmd_cuc) { /* timed out? */
896 printk("%s: Acknowledge timed out.\n",dev->name); 923 printk(KERN_ERR "%s: Acknowledge timed out.\n",
924 dev->name);
897 ni_disint(); 925 ni_disint();
898 break; 926 break;
899 } 927 }
900 } 928 }
929 spin_unlock(&p->spinlock);
901 930
902 if(debuglevel > 1) 931 if (debuglevel > 1)
903 printk("i"); 932 printk("i");
904 return IRQ_HANDLED; 933 return IRQ_HANDLED;
905} 934}
@@ -910,121 +939,91 @@ static irqreturn_t ni52_interrupt(int irq,void *dev_id)
910 939
911static void ni52_rcv_int(struct net_device *dev) 940static void ni52_rcv_int(struct net_device *dev)
912{ 941{
913 int status,cnt=0; 942 int status, cnt = 0;
914 unsigned short totlen; 943 unsigned short totlen;
915 struct sk_buff *skb; 944 struct sk_buff *skb;
916 struct rbd_struct *rbd; 945 struct rbd_struct *rbd;
917 struct priv *p = (struct priv *) dev->priv; 946 struct priv *p = (struct priv *)dev->priv;
918 947
919 if(debuglevel > 0) 948 if (debuglevel > 0)
920 printk("R"); 949 printk("R");
921 950
922 for(;(status = p->rfd_top->stat_high) & RFD_COMPL;) 951 for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
923 { 952 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
924 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); 953 if (status & RFD_OK) { /* frame received without error? */
925 954 totlen = readw(&rbd->status);
926 if(status & RFD_OK) /* frame received without error? */ 955 if (totlen & RBD_LAST) {
927 { 956 /* the first and the last buffer? */
928 if( (totlen = rbd->status) & RBD_LAST) /* the first and the last buffer? */ 957 totlen &= RBD_MASK; /* length of this frame */
929 { 958 writew(0x00, &rbd->status);
930 totlen &= RBD_MASK; /* length of this frame */ 959 skb = (struct sk_buff *)dev_alloc_skb(totlen+2);
931 rbd->status = 0; 960 if (skb != NULL) {
932 skb = (struct sk_buff *) dev_alloc_skb(totlen+2); 961 skb_reserve(skb, 2);
933 if(skb != NULL) 962 skb_put(skb, totlen);
934 { 963 skb_copy_to_linear_data(skb, (char *)p->base + (unsigned long) rbd->buffer, totlen);
935 skb_reserve(skb,2); 964 skb->protocol = eth_type_trans(skb, dev);
936 skb_put(skb,totlen); 965 netif_rx(skb);
937 skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen); 966 dev->last_rx = jiffies;
938 skb->protocol=eth_type_trans(skb,dev); 967 p->stats.rx_packets++;
939 netif_rx(skb); 968 p->stats.rx_bytes += totlen;
940 dev->last_rx = jiffies; 969 } else
941 p->stats.rx_packets++; 970 p->stats.rx_dropped++;
942 p->stats.rx_bytes += totlen; 971 } else {
972 int rstat;
973 /* free all RBD's until RBD_LAST is set */
974 totlen = 0;
975 while (!((rstat = readw(&rbd->status)) & RBD_LAST)) {
976 totlen += rstat & RBD_MASK;
977 if (!rstat) {
978 printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name);
979 break;
943 } 980 }
944 else 981 writew(0, &rbd->status);
945 p->stats.rx_dropped++; 982 rbd = (struct rbd_struct *) make32(readl(&rbd->next));
946 } 983 }
947 else 984 totlen += rstat & RBD_MASK;
948 { 985 writew(0, &rbd->status);
949 int rstat; 986 printk(KERN_ERR "%s: received oversized frame! length: %d\n",
950 /* free all RBD's until RBD_LAST is set */ 987 dev->name, totlen);
951 totlen = 0; 988 p->stats.rx_dropped++;
952 while(!((rstat=rbd->status) & RBD_LAST))
953 {
954 totlen += rstat & RBD_MASK;
955 if(!rstat)
956 {
957 printk("%s: Whoops .. no end mark in RBD list\n",dev->name);
958 break;
959 }
960 rbd->status = 0;
961 rbd = (struct rbd_struct *) make32(rbd->next);
962 }
963 totlen += rstat & RBD_MASK;
964 rbd->status = 0;
965 printk("%s: received oversized frame! length: %d\n",dev->name,totlen);
966 p->stats.rx_dropped++;
967 } 989 }
968 } 990 } else {/* frame !(ok), only with 'save-bad-frames' */
969 else /* frame !(ok), only with 'save-bad-frames' */ 991 printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
970 { 992 dev->name, status);
971 printk("%s: oops! rfd-error-status: %04x\n",dev->name,status);
972 p->stats.rx_errors++; 993 p->stats.rx_errors++;
973 } 994 }
974 p->rfd_top->stat_high = 0; 995 writeb(0, &p->rfd_top->stat_high);
975 p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ 996 writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */
976 p->rfd_top->rbd_offset = 0xffff; 997 writew(0xffff, &p->rfd_top->rbd_offset);
977 p->rfd_last->last = 0; /* delete RFD_SUSP */ 998 writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */
978 p->rfd_last = p->rfd_top; 999 p->rfd_last = p->rfd_top;
979 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ 1000 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */
980 p->scb->rfa_offset = make16(p->rfd_top); 1001 writew(make16(p->rfd_top), &p->scb->rfa_offset);
981 1002
982 if(debuglevel > 0) 1003 if (debuglevel > 0)
983 printk("%d",cnt++); 1004 printk("%d", cnt++);
984 } 1005 }
985 1006
986 if(automatic_resume) 1007 if (automatic_resume) {
987 { 1008 wait_for_scb_cmd(dev);
988 WAIT_4_SCB_CMD(); 1009 writeb(RUC_RESUME, &p->scb->cmd_ruc);
989 p->scb->cmd_ruc = RUC_RESUME;
990 ni_attn586(); 1010 ni_attn586();
991 WAIT_4_SCB_CMD_RUC(); 1011 wait_for_scb_cmd_ruc(dev);
992 } 1012 }
993 1013
994#ifdef WAIT_4_BUSY 1014#ifdef WAIT_4_BUSY
995 { 1015 {
996 int i; 1016 int i;
997 for(i=0;i<1024;i++) 1017 for (i = 0; i < 1024; i++) {
998 { 1018 if (p->rfd_top->status)
999 if(p->rfd_top->status)
1000 break; 1019 break;
1001 DELAY_16(); 1020 udelay(16);
1002 if(i == 1023) 1021 if (i == 1023)
1003 printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); 1022 printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name);
1004 } 1023 }
1005 } 1024 }
1006#endif 1025#endif
1007 1026 if (debuglevel > 0)
1008#if 0
1009 if(!at_least_one)
1010 {
1011 int i;
1012 volatile struct rfd_struct *rfds=p->rfd_top;
1013 volatile struct rbd_struct *rbds;
1014 printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least);
1015 for(i=0;i< (p->num_recv_buffs+4);i++)
1016 {
1017 rbds = (struct rbd_struct *) make32(rfds->rbd_offset);
1018 printk("%04x:%04x ",rfds->status,rbds->status);
1019 rfds = (struct rfd_struct *) make32(rfds->next);
1020 }
1021 printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status);
1022 printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus);
1023 }
1024 old_at_least = at_least_one;
1025#endif
1026
1027 if(debuglevel > 0)
1028 printk("r"); 1027 printk("r");
1029} 1028}
1030 1029
@@ -1038,16 +1037,16 @@ static void ni52_rnr_int(struct net_device *dev)
1038 1037
1039 p->stats.rx_errors++; 1038 p->stats.rx_errors++;
1040 1039
1041 WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ 1040 wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
1042 p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ 1041 writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */
1043 ni_attn586(); 1042 ni_attn586();
1044 WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */ 1043 wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */
1045 1044
1046 alloc_rfa(dev,(char *)p->rfd_first); 1045 alloc_rfa(dev, (char *)p->rfd_first);
1047/* maybe add a check here, before restarting the RU */ 1046 /* maybe add a check here, before restarting the RU */
1048 startrecv586(dev); /* restart RU */ 1047 startrecv586(dev); /* restart RU */
1049 1048
1050 printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus); 1049 printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->rus);
1051 1050
1052} 1051}
1053 1052
@@ -1060,43 +1059,41 @@ static void ni52_xmt_int(struct net_device *dev)
1060 int status; 1059 int status;
1061 struct priv *p = (struct priv *) dev->priv; 1060 struct priv *p = (struct priv *) dev->priv;
1062 1061
1063 if(debuglevel > 0) 1062 if (debuglevel > 0)
1064 printk("X"); 1063 printk("X");
1065 1064
1066 status = p->xmit_cmds[p->xmit_last]->cmd_status; 1065 status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status);
1067 if(!(status & STAT_COMPL)) 1066 if (!(status & STAT_COMPL))
1068 printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name); 1067 printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
1069 1068
1070 if(status & STAT_OK) 1069 if (status & STAT_OK) {
1071 {
1072 p->stats.tx_packets++; 1070 p->stats.tx_packets++;
1073 p->stats.collisions += (status & TCMD_MAXCOLLMASK); 1071 p->stats.collisions += (status & TCMD_MAXCOLLMASK);
1074 } 1072 } else {
1075 else
1076 {
1077 p->stats.tx_errors++; 1073 p->stats.tx_errors++;
1078 if(status & TCMD_LATECOLL) { 1074 if (status & TCMD_LATECOLL) {
1079 printk("%s: late collision detected.\n",dev->name); 1075 printk(KERN_ERR "%s: late collision detected.\n",
1076 dev->name);
1080 p->stats.collisions++; 1077 p->stats.collisions++;
1081 } 1078 } else if (status & TCMD_NOCARRIER) {
1082 else if(status & TCMD_NOCARRIER) {
1083 p->stats.tx_carrier_errors++; 1079 p->stats.tx_carrier_errors++;
1084 printk("%s: no carrier detected.\n",dev->name); 1080 printk(KERN_ERR "%s: no carrier detected.\n",
1085 } 1081 dev->name);
1086 else if(status & TCMD_LOSTCTS) 1082 } else if (status & TCMD_LOSTCTS)
1087 printk("%s: loss of CTS detected.\n",dev->name); 1083 printk(KERN_ERR "%s: loss of CTS detected.\n",
1088 else if(status & TCMD_UNDERRUN) { 1084 dev->name);
1085 else if (status & TCMD_UNDERRUN) {
1089 p->stats.tx_fifo_errors++; 1086 p->stats.tx_fifo_errors++;
1090 printk("%s: DMA underrun detected.\n",dev->name); 1087 printk(KERN_ERR "%s: DMA underrun detected.\n",
1091 } 1088 dev->name);
1092 else if(status & TCMD_MAXCOLL) { 1089 } else if (status & TCMD_MAXCOLL) {
1093 printk("%s: Max. collisions exceeded.\n",dev->name); 1090 printk(KERN_ERR "%s: Max. collisions exceeded.\n",
1091 dev->name);
1094 p->stats.collisions += 16; 1092 p->stats.collisions += 16;
1095 } 1093 }
1096 } 1094 }
1097
1098#if (NUM_XMIT_BUFFS > 1) 1095#if (NUM_XMIT_BUFFS > 1)
1099 if( (++p->xmit_last) == NUM_XMIT_BUFFS) 1096 if ((++p->xmit_last) == NUM_XMIT_BUFFS)
1100 p->xmit_last = 0; 1097 p->xmit_last = 0;
1101#endif 1098#endif
1102 netif_wake_queue(dev); 1099 netif_wake_queue(dev);
@@ -1110,41 +1107,51 @@ static void startrecv586(struct net_device *dev)
1110{ 1107{
1111 struct priv *p = (struct priv *) dev->priv; 1108 struct priv *p = (struct priv *) dev->priv;
1112 1109
1113 WAIT_4_SCB_CMD(); 1110 wait_for_scb_cmd(dev);
1114 WAIT_4_SCB_CMD_RUC(); 1111 wait_for_scb_cmd_ruc(dev);
1115 p->scb->rfa_offset = make16(p->rfd_first); 1112 writew(make16(p->rfd_first), &p->scb->rfa_offset);
1116 p->scb->cmd_ruc = RUC_START; 1113 writeb(RUC_START, &p->scb->cmd_ruc);
1117 ni_attn586(); /* start cmd. */ 1114 ni_attn586(); /* start cmd. */
1118 WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */ 1115 wait_for_scb_cmd_ruc(dev);
1116 /* wait for accept cmd. (no timeout!!) */
1119} 1117}
1120 1118
1121static void ni52_timeout(struct net_device *dev) 1119static void ni52_timeout(struct net_device *dev)
1122{ 1120{
1123 struct priv *p = (struct priv *) dev->priv; 1121 struct priv *p = (struct priv *) dev->priv;
1124#ifndef NO_NOPCOMMANDS 1122#ifndef NO_NOPCOMMANDS
1125 if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */ 1123 if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */
1126 {
1127 netif_wake_queue(dev); 1124 netif_wake_queue(dev);
1128#ifdef DEBUG 1125#ifdef DEBUG
1129 printk("%s: strange ... timeout with CU active?!?\n",dev->name); 1126 printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n",
1130 printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)p->xmit_cmds[0]->cmd_status,(int)p->nop_cmds[0]->cmd_status,(int)p->nop_cmds[1]->cmd_status,(int)p->nop_point); 1127 dev->name);
1128 printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n",
1129 dev->name, (int)p->xmit_cmds[0]->cmd_status,
1130 readw(&p->nop_cmds[0]->cmd_status),
1131 readw(&p->nop_cmds[1]->cmd_status),
1132 p->nop_point);
1131#endif 1133#endif
1132 p->scb->cmd_cuc = CUC_ABORT; 1134 writeb(CUC_ABORT, &p->scb->cmd_cuc);
1133 ni_attn586(); 1135 ni_attn586();
1134 WAIT_4_SCB_CMD(); 1136 wait_for_scb_cmd(dev);
1135 p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); 1137 writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset);
1136 p->scb->cmd_cuc = CUC_START; 1138 writeb(CUC_START, &p->scb->cmd_cuc);
1137 ni_attn586(); 1139 ni_attn586();
1138 WAIT_4_SCB_CMD(); 1140 wait_for_scb_cmd(dev);
1139 dev->trans_start = jiffies; 1141 dev->trans_start = jiffies;
1140 return 0; 1142 return 0;
1141 } 1143 }
1142#endif 1144#endif
1143 { 1145 {
1144#ifdef DEBUG 1146#ifdef DEBUG
1145 printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); 1147 printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n",
1146 printk("%s: command-stats: %04x %04x\n",dev->name,p->xmit_cmds[0]->cmd_status,p->xmit_cmds[1]->cmd_status); 1148 dev->name, readb(&p->scb->cus));
1147 printk("%s: check, whether you set the right interrupt number!\n",dev->name); 1149 printk(KERN_ERR "%s: command-stats: %04x %04x\n",
1150 dev->name,
1151 readw(&p->xmit_cmds[0]->cmd_status),
1152 readw(&p->xmit_cmds[1]->cmd_status));
1153 printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n",
1154 dev->name);
1148#endif 1155#endif
1149 ni52_close(dev); 1156 ni52_close(dev);
1150 ni52_open(dev); 1157 ni52_open(dev);
@@ -1158,110 +1165,99 @@ static void ni52_timeout(struct net_device *dev)
1158 1165
1159static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev) 1166static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1160{ 1167{
1161 int len,i; 1168 int len, i;
1162#ifndef NO_NOPCOMMANDS 1169#ifndef NO_NOPCOMMANDS
1163 int next_nop; 1170 int next_nop;
1164#endif 1171#endif
1165 struct priv *p = (struct priv *) dev->priv; 1172 struct priv *p = (struct priv *) dev->priv;
1166 1173
1167 if(skb->len > XMIT_BUFF_SIZE) 1174 if (skb->len > XMIT_BUFF_SIZE) {
1168 { 1175 printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
1169 printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
1170 return 0; 1176 return 0;
1171 } 1177 }
1172 1178
1173 netif_stop_queue(dev); 1179 netif_stop_queue(dev);
1174 1180
1175#if(NUM_XMIT_BUFFS > 1) 1181 skb_copy_from_linear_data(skb, (char *)p->xmit_cbuffs[p->xmit_count],
1176 if(test_and_set_bit(0,(void *) &p->lock)) { 1182 skb->len);
1177 printk("%s: Queue was locked\n",dev->name); 1183 len = skb->len;
1178 return 1; 1184 if (len < ETH_ZLEN) {
1185 len = ETH_ZLEN;
1186 memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
1187 len - skb->len);
1179 } 1188 }
1180 else
1181#endif
1182 {
1183 skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len);
1184 len = skb->len;
1185 if (len < ETH_ZLEN) {
1186 len = ETH_ZLEN;
1187 memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0, len - skb->len);
1188 }
1189 1189
1190#if (NUM_XMIT_BUFFS == 1) 1190#if (NUM_XMIT_BUFFS == 1)
1191# ifdef NO_NOPCOMMANDS 1191# ifdef NO_NOPCOMMANDS
1192 1192
1193#ifdef DEBUG 1193#ifdef DEBUG
1194 if(p->scb->cus & CU_ACTIVE) 1194 if (p->scb->cus & CU_ACTIVE) {
1195 { 1195 printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
1196 printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name); 1196 printk(KERN_ERR "%s: stat: %04x %04x\n",
1197 printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,p->xmit_cmds[0]->cmd_status); 1197 dev->name, readb(&p->scb->cus),
1198 } 1198 readw(&p->xmit_cmds[0]->cmd_status));
1199 }
1199#endif 1200#endif
1200 1201 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);;
1201 p->xmit_buffs[0]->size = TBD_LAST | len; 1202 for (i = 0; i < 16; i++) {
1202 for(i=0;i<16;i++) 1203 writew(0, &p->xmit_cmds[0]->cmd_status);
1203 { 1204 wait_for_scb_cmd(dev);
1204 p->xmit_cmds[0]->cmd_status = 0; 1205 if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
1205 WAIT_4_SCB_CMD(); 1206 writeb(CUC_RESUME, &p->scb->cmd_cuc);
1206 if( (p->scb->cus & CU_STATUS) == CU_SUSPEND) 1207 else {
1207 p->scb->cmd_cuc = CUC_RESUME; 1208 writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
1208 else 1209 writeb(CUC_START, &p->scb->cmd_cuc);
1209 {
1210 p->scb->cbl_offset = make16(p->xmit_cmds[0]);
1211 p->scb->cmd_cuc = CUC_START;
1212 }
1213
1214 ni_attn586();
1215 dev->trans_start = jiffies;
1216 if(!i)
1217 dev_kfree_skb(skb);
1218 WAIT_4_SCB_CMD();
1219 if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */
1220 break;
1221 if(p->xmit_cmds[0]->cmd_status)
1222 break;
1223 if(i==15)
1224 printk("%s: Can't start transmit-command.\n",dev->name);
1225 } 1210 }
1226# else 1211 ni_attn586();
1227 next_nop = (p->nop_point + 1) & 0x1;
1228 p->xmit_buffs[0]->size = TBD_LAST | len;
1229
1230 p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link
1231 = make16((p->nop_cmds[next_nop]));
1232 p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
1233
1234 p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
1235 dev->trans_start = jiffies; 1212 dev->trans_start = jiffies;
1236 p->nop_point = next_nop; 1213 if (!i)
1237 dev_kfree_skb(skb); 1214 dev_kfree_skb(skb);
1215 wait_for_scb_cmd(dev);
1216 /* test it, because CU sometimes doesn't start immediately */
1217 if (readb(&p->scb->cus) & CU_ACTIVE)
1218 break;
1219 if (readw(&p->xmit_cmds[0]->cmd_status))
1220 break;
1221 if (i == 15)
1222 printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
1223 }
1224# else
1225 next_nop = (p->nop_point + 1) & 0x1;
1226 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
1227 writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
1228 writew(make16(p->nop_cmds[next_nop]),
1229 &p->nop_cmds[next_nop]->cmd_link);
1230 writew(0, &p->xmit_cmds[0]->cmd_status);
1231 writew(0, &p->nop_cmds[next_nop]->cmd_status);
1232
1233 writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
1234 dev->trans_start = jiffies;
1235 p->nop_point = next_nop;
1236 dev_kfree_skb(skb);
1238# endif 1237# endif
1239#else 1238#else
1240 p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len; 1239 writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
1241 if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS ) 1240 next_nop = p->xmit_count + 1
1242 next_nop = 0; 1241 if (next_nop == NUM_XMIT_BUFFS)
1243 1242 next_nop = 0;
1244 p->xmit_cmds[p->xmit_count]->cmd_status = 0; 1243 writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
1245 /* linkpointer of xmit-command already points to next nop cmd */ 1244 /* linkpointer of xmit-command already points to next nop cmd */
1246 p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); 1245 writew(make16(p->nop_cmds[next_nop]),
1247 p->nop_cmds[next_nop]->cmd_status = 0; 1246 &p->nop_cmds[next_nop]->cmd_link);
1248 1247 writew(0, &p->nop_cmds[next_nop]->cmd_status);
1249 p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); 1248 writew(make16(p->xmit_cmds[p->xmit_count]),
1250 dev->trans_start = jiffies; 1249 &p->nop_cmds[p->xmit_count]->cmd_link);
1251 p->xmit_count = next_nop; 1250 dev->trans_start = jiffies;
1252 1251 p->xmit_count = next_nop;
1253 { 1252 {
1254 unsigned long flags; 1253 unsigned long flags;
1255 save_flags(flags); 1254 spin_lock_irqsave(&p->spinlock);
1256 cli(); 1255 if (p->xmit_count != p->xmit_last)
1257 if(p->xmit_count != p->xmit_last) 1256 netif_wake_queue(dev);
1258 netif_wake_queue(dev); 1257 spin_unlock_irqrestore(&p->spinlock);
1259 p->lock = 0;
1260 restore_flags(flags);
1261 }
1262 dev_kfree_skb(skb);
1263#endif
1264 } 1258 }
1259 dev_kfree_skb(skb);
1260#endif
1265 return 0; 1261 return 0;
1266} 1262}
1267 1263
@@ -1272,16 +1268,17 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1272static struct net_device_stats *ni52_get_stats(struct net_device *dev) 1268static struct net_device_stats *ni52_get_stats(struct net_device *dev)
1273{ 1269{
1274 struct priv *p = (struct priv *) dev->priv; 1270 struct priv *p = (struct priv *) dev->priv;
1275 unsigned short crc,aln,rsc,ovrn; 1271 unsigned short crc, aln, rsc, ovrn;
1276 1272
1277 crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */ 1273 /* Get error-statistics from the ni82586 */
1278 p->scb->crc_errs = 0; 1274 crc = readw(&p->scb->crc_errs);
1279 aln = p->scb->aln_errs; 1275 writew(0, &p->scb->crc_errs);
1280 p->scb->aln_errs = 0; 1276 aln = readw(&p->scb->aln_errs);
1281 rsc = p->scb->rsc_errs; 1277 writew(0, &p->scb->aln_errs);
1282 p->scb->rsc_errs = 0; 1278 rsc = readw(&p->scb->rsc_errs);
1283 ovrn = p->scb->ovrn_errs; 1279 writew(0, &p->scb->rsc_errs);
1284 p->scb->ovrn_errs = 0; 1280 ovrn = readw(&p->scb->ovrn_errs);
1281 writew(0, &p->scb->ovrn_errs);
1285 1282
1286 p->stats.rx_crc_errors += crc; 1283 p->stats.rx_crc_errors += crc;
1287 p->stats.rx_fifo_errors += ovrn; 1284 p->stats.rx_fifo_errors += ovrn;
@@ -1320,8 +1317,9 @@ MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
1320 1317
1321int __init init_module(void) 1318int __init init_module(void)
1322{ 1319{
1323 if(io <= 0x0 || !memend || !memstart || irq < 2) { 1320 if (io <= 0x0 || !memend || !memstart || irq < 2) {
1324 printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); 1321 printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n");
1322 printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
1325 return -ENODEV; 1323 return -ENODEV;
1326 } 1324 }
1327 dev_ni52 = ni52_probe(-1); 1325 dev_ni52 = ni52_probe(-1);
@@ -1338,42 +1336,6 @@ void __exit cleanup_module(void)
1338} 1336}
1339#endif /* MODULE */ 1337#endif /* MODULE */
1340 1338
1341#if 0
1342/*
1343 * DUMP .. we expect a not running CMD unit and enough space
1344 */
1345void ni52_dump(struct net_device *dev,void *ptr)
1346{
1347 struct priv *p = (struct priv *) dev->priv;
1348 struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
1349 int i;
1350
1351 p->scb->cmd_cuc = CUC_ABORT;
1352 ni_attn586();
1353 WAIT_4_SCB_CMD();
1354 WAIT_4_SCB_CMD_RUC();
1355
1356 dump_cmd->cmd_status = 0;
1357 dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST;
1358 dump_cmd->dump_offset = make16((dump_cmd + 1));
1359 dump_cmd->cmd_link = 0xffff;
1360
1361 p->scb->cbl_offset = make16(dump_cmd);
1362 p->scb->cmd_cuc = CUC_START;
1363 ni_attn586();
1364 WAIT_4_STAT_COMPL(dump_cmd);
1365
1366 if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) )
1367 printk("%s: Can't get dump information.\n",dev->name);
1368
1369 for(i=0;i<170;i++) {
1370 printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]);
1371 if(i % 24 == 23)
1372 printk("\n");
1373 }
1374 printk("\n");
1375}
1376#endif
1377MODULE_LICENSE("GPL"); 1339MODULE_LICENSE("GPL");
1378 1340
1379/* 1341/*
diff --git a/drivers/net/ni52.h b/drivers/net/ni52.h
index a33ea0884aaf..1f28a4d1a319 100644
--- a/drivers/net/ni52.h
+++ b/drivers/net/ni52.h
@@ -36,12 +36,12 @@
36 36
37struct scp_struct 37struct scp_struct
38{ 38{
39 unsigned short zero_dum0; /* has to be zero */ 39 u16 zero_dum0; /* has to be zero */
40 unsigned char sysbus; /* 0=16Bit,1=8Bit */ 40 u8 sysbus; /* 0=16Bit,1=8Bit */
41 unsigned char zero_dum1; /* has to be zero for 586 */ 41 u8 zero_dum1; /* has to be zero for 586 */
42 unsigned short zero_dum2; 42 u8 zero_dum2;
43 unsigned short zero_dum3; 43 u8 zero_dum3;
44 char *iscp; /* pointer to the iscp-block */ 44 u32 iscp; /* pointer to the iscp-block */
45}; 45};
46 46
47 47
@@ -50,10 +50,10 @@ struct scp_struct
50 */ 50 */
51struct iscp_struct 51struct iscp_struct
52{ 52{
53 unsigned char busy; /* 586 clears after successful init */ 53 u8 busy; /* 586 clears after successful init */
54 unsigned char zero_dummy; /* has to be zero */ 54 u8 zero_dummy; /* has to be zero */
55 unsigned short scb_offset; /* pointeroffset to the scb_base */ 55 u16 scb_offset; /* pointeroffset to the scb_base */
56 char *scb_base; /* base-address of all 16-bit offsets */ 56 u32 scb_base; /* base-address of all 16-bit offsets */
57}; 57};
58 58
59/* 59/*
@@ -61,16 +61,16 @@ struct iscp_struct
61 */ 61 */
62struct scb_struct 62struct scb_struct
63{ 63{
64 unsigned char rus; 64 u8 rus;
65 unsigned char cus; 65 u8 cus;
66 unsigned char cmd_ruc; /* command word: RU part */ 66 u8 cmd_ruc; /* command word: RU part */
67 unsigned char cmd_cuc; /* command word: CU part & ACK */ 67 u8 cmd_cuc; /* command word: CU part & ACK */
68 unsigned short cbl_offset; /* pointeroffset, command block list */ 68 u16 cbl_offset; /* pointeroffset, command block list */
69 unsigned short rfa_offset; /* pointeroffset, receive frame area */ 69 u16 rfa_offset; /* pointeroffset, receive frame area */
70 unsigned short crc_errs; /* CRC-Error counter */ 70 u16 crc_errs; /* CRC-Error counter */
71 unsigned short aln_errs; /* alignmenterror counter */ 71 u16 aln_errs; /* alignmenterror counter */
72 unsigned short rsc_errs; /* Resourceerror counter */ 72 u16 rsc_errs; /* Resourceerror counter */
73 unsigned short ovrn_errs; /* OVerrunerror counter */ 73 u16 ovrn_errs; /* OVerrunerror counter */
74}; 74};
75 75
76/* 76/*
@@ -119,16 +119,16 @@ struct scb_struct
119 */ 119 */
120struct rfd_struct 120struct rfd_struct
121{ 121{
122 unsigned char stat_low; /* status word */ 122 u8 stat_low; /* status word */
123 unsigned char stat_high; /* status word */ 123 u8 stat_high; /* status word */
124 unsigned char rfd_sf; /* 82596 mode only */ 124 u8 rfd_sf; /* 82596 mode only */
125 unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ 125 u8 last; /* Bit15,Last Frame on List / Bit14,suspend */
126 unsigned short next; /* linkoffset to next RFD */ 126 u16 next; /* linkoffset to next RFD */
127 unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ 127 u16 rbd_offset; /* pointeroffset to RBD-buffer */
128 unsigned char dest[6]; /* ethernet-address, destination */ 128 u8 dest[6]; /* ethernet-address, destination */
129 unsigned char source[6]; /* ethernet-address, source */ 129 u8 source[6]; /* ethernet-address, source */
130 unsigned short length; /* 802.3 frame-length */ 130 u16 length; /* 802.3 frame-length */
131 unsigned short zero_dummy; /* dummy */ 131 u16 zero_dummy; /* dummy */
132}; 132};
133 133
134#define RFD_LAST 0x80 /* last: last rfd in the list */ 134#define RFD_LAST 0x80 /* last: last rfd in the list */
@@ -153,11 +153,11 @@ struct rfd_struct
153 */ 153 */
154struct rbd_struct 154struct rbd_struct
155{ 155{
156 unsigned short status; /* status word,number of used bytes in buff */ 156 u16 status; /* status word,number of used bytes in buff */
157 unsigned short next; /* pointeroffset to next RBD */ 157 u16 next; /* pointeroffset to next RBD */
158 char *buffer; /* receive buffer address pointer */ 158 u32 buffer; /* receive buffer address pointer */
159 unsigned short size; /* size of this buffer */ 159 u16 size; /* size of this buffer */
160 unsigned short zero_dummy; /* dummy */ 160 u16 zero_dummy; /* dummy */
161}; 161};
162 162
163#define RBD_LAST 0x8000 /* last buffer */ 163#define RBD_LAST 0x8000 /* last buffer */
@@ -195,9 +195,9 @@ struct rbd_struct
195 */ 195 */
196struct nop_cmd_struct 196struct nop_cmd_struct
197{ 197{
198 unsigned short cmd_status; /* status of this command */ 198 u16 cmd_status; /* status of this command */
199 unsigned short cmd_cmd; /* the command itself (+bits) */ 199 u16 cmd_cmd; /* the command itself (+bits) */
200 unsigned short cmd_link; /* offsetpointer to next command */ 200 u16 cmd_link; /* offsetpointer to next command */
201}; 201};
202 202
203/* 203/*
@@ -205,10 +205,10 @@ struct nop_cmd_struct
205 */ 205 */
206struct iasetup_cmd_struct 206struct iasetup_cmd_struct
207{ 207{
208 unsigned short cmd_status; 208 u16 cmd_status;
209 unsigned short cmd_cmd; 209 u16 cmd_cmd;
210 unsigned short cmd_link; 210 u16 cmd_link;
211 unsigned char iaddr[6]; 211 u8 iaddr[6];
212}; 212};
213 213
214/* 214/*
@@ -216,21 +216,21 @@ struct iasetup_cmd_struct
216 */ 216 */
217struct configure_cmd_struct 217struct configure_cmd_struct
218{ 218{
219 unsigned short cmd_status; 219 u16 cmd_status;
220 unsigned short cmd_cmd; 220 u16 cmd_cmd;
221 unsigned short cmd_link; 221 u16 cmd_link;
222 unsigned char byte_cnt; /* size of the config-cmd */ 222 u8 byte_cnt; /* size of the config-cmd */
223 unsigned char fifo; /* fifo/recv monitor */ 223 u8 fifo; /* fifo/recv monitor */
224 unsigned char sav_bf; /* save bad frames (bit7=1)*/ 224 u8 sav_bf; /* save bad frames (bit7=1)*/
225 unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ 225 u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
226 unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ 226 u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
227 unsigned char ifs; /* inter frame spacing */ 227 u8 ifs; /* inter frame spacing */
228 unsigned char time_low; /* slot time low */ 228 u8 time_low; /* slot time low */
229 unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */ 229 u8 time_high; /* slot time high(0-2) and max. retries(4-7) */
230 unsigned char promisc; /* promisc-mode(0) , et al (1-7) */ 230 u8 promisc; /* promisc-mode(0) , et al (1-7) */
231 unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */ 231 u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */
232 unsigned char fram_len; /* minimal frame len */ 232 u8 fram_len; /* minimal frame len */
233 unsigned char dummy; /* dummy */ 233 u8 dummy; /* dummy */
234}; 234};
235 235
236/* 236/*
@@ -238,11 +238,11 @@ struct configure_cmd_struct
238 */ 238 */
239struct mcsetup_cmd_struct 239struct mcsetup_cmd_struct
240{ 240{
241 unsigned short cmd_status; 241 u16 cmd_status;
242 unsigned short cmd_cmd; 242 u16 cmd_cmd;
243 unsigned short cmd_link; 243 u16 cmd_link;
244 unsigned short mc_cnt; /* number of bytes in the MC-List */ 244 u16 mc_cnt; /* number of bytes in the MC-List */
245 unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */ 245 u8 mc_list[0][6]; /* pointer to 6 bytes entries */
246}; 246};
247 247
248/* 248/*
@@ -250,10 +250,10 @@ struct mcsetup_cmd_struct
250 */ 250 */
251struct dump_cmd_struct 251struct dump_cmd_struct
252{ 252{
253 unsigned short cmd_status; 253 u16 cmd_status;
254 unsigned short cmd_cmd; 254 u16 cmd_cmd;
255 unsigned short cmd_link; 255 u16 cmd_link;
256 unsigned short dump_offset; /* pointeroffset to DUMP space */ 256 u16 dump_offset; /* pointeroffset to DUMP space */
257}; 257};
258 258
259/* 259/*
@@ -261,12 +261,12 @@ struct dump_cmd_struct
261 */ 261 */
262struct transmit_cmd_struct 262struct transmit_cmd_struct
263{ 263{
264 unsigned short cmd_status; 264 u16 cmd_status;
265 unsigned short cmd_cmd; 265 u16 cmd_cmd;
266 unsigned short cmd_link; 266 u16 cmd_link;
267 unsigned short tbd_offset; /* pointeroffset to TBD */ 267 u16 tbd_offset; /* pointeroffset to TBD */
268 unsigned char dest[6]; /* destination address of the frame */ 268 u8 dest[6]; /* destination address of the frame */
269 unsigned short length; /* user defined: 802.3 length / Ether type */ 269 u16 length; /* user defined: 802.3 length / Ether type */
270}; 270};
271 271
272#define TCMD_ERRMASK 0x0fa0 272#define TCMD_ERRMASK 0x0fa0
@@ -281,10 +281,10 @@ struct transmit_cmd_struct
281 281
282struct tdr_cmd_struct 282struct tdr_cmd_struct
283{ 283{
284 unsigned short cmd_status; 284 u16 cmd_status;
285 unsigned short cmd_cmd; 285 u16 cmd_cmd;
286 unsigned short cmd_link; 286 u16 cmd_link;
287 unsigned short status; 287 u16 status;
288}; 288};
289 289
290#define TDR_LNK_OK 0x8000 /* No link problem identified */ 290#define TDR_LNK_OK 0x8000 /* No link problem identified */
@@ -298,9 +298,9 @@ struct tdr_cmd_struct
298 */ 298 */
299struct tbd_struct 299struct tbd_struct
300{ 300{
301 unsigned short size; /* size + EOF-Flag(15) */ 301 u16 size; /* size + EOF-Flag(15) */
302 unsigned short next; /* pointeroffset to next TBD */ 302 u16 next; /* pointeroffset to next TBD */
303 char *buffer; /* pointer to buffer */ 303 u32 buffer; /* pointer to buffer */
304}; 304};
305 305
306#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */ 306#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2fe14b0c5c67..d11ba61baa4f 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -33,8 +33,8 @@
33 33
34#define DRV_MODULE_NAME "niu" 34#define DRV_MODULE_NAME "niu"
35#define PFX DRV_MODULE_NAME ": " 35#define PFX DRV_MODULE_NAME ": "
36#define DRV_MODULE_VERSION "0.6" 36#define DRV_MODULE_VERSION "0.7"
37#define DRV_MODULE_RELDATE "January 5, 2008" 37#define DRV_MODULE_RELDATE "February 18, 2008"
38 38
39static char version[] __devinitdata = 39static char version[] __devinitdata =
40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 40 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -1616,12 +1616,13 @@ static int niu_enable_alt_mac(struct niu *np, int index, int on)
1616 if (index >= niu_num_alt_addr(np)) 1616 if (index >= niu_num_alt_addr(np))
1617 return -EINVAL; 1617 return -EINVAL;
1618 1618
1619 if (np->flags & NIU_FLAGS_XMAC) 1619 if (np->flags & NIU_FLAGS_XMAC) {
1620 reg = XMAC_ADDR_CMPEN; 1620 reg = XMAC_ADDR_CMPEN;
1621 else 1621 mask = 1 << index;
1622 } else {
1622 reg = BMAC_ADDR_CMPEN; 1623 reg = BMAC_ADDR_CMPEN;
1623 1624 mask = 1 << (index + 1);
1624 mask = 1 << index; 1625 }
1625 1626
1626 val = nr64_mac(reg); 1627 val = nr64_mac(reg);
1627 if (on) 1628 if (on)
@@ -5147,7 +5148,12 @@ static void niu_set_rx_mode(struct net_device *dev)
5147 index++; 5148 index++;
5148 } 5149 }
5149 } else { 5150 } else {
5150 for (i = 0; i < niu_num_alt_addr(np); i++) { 5151 int alt_start;
5152 if (np->flags & NIU_FLAGS_XMAC)
5153 alt_start = 0;
5154 else
5155 alt_start = 1;
5156 for (i = alt_start; i < niu_num_alt_addr(np); i++) {
5151 err = niu_enable_alt_mac(np, i, 0); 5157 err = niu_enable_alt_mac(np, i, 0);
5152 if (err) 5158 if (err)
5153 printk(KERN_WARNING PFX "%s: Error %d " 5159 printk(KERN_WARNING PFX "%s: Error %d "
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 0e8626adc573..59dc05fcd371 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -499,7 +499,7 @@
499#define BMAC_ADDR2 0x00110UL 499#define BMAC_ADDR2 0x00110UL
500#define BMAC_ADDR2_ADDR2 0x000000000000ffffULL 500#define BMAC_ADDR2_ADDR2 0x000000000000ffffULL
501 501
502#define BMAC_NUM_ALT_ADDR 7 502#define BMAC_NUM_ALT_ADDR 6
503 503
504#define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL) 504#define BMAC_ALT_ADDR0(NUM) (0x00118UL + (NUM)*0x18UL)
505#define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL 505#define BMAC_ALT_ADDR0_ADDR0 0x000000000000ffffULL
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index f18eca9831e8..250eb1954c34 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -559,8 +559,16 @@ static int mhz_setup(struct pcmcia_device *link)
559 559
560 /* Read the station address from the CIS. It is stored as the last 560 /* Read the station address from the CIS. It is stored as the last
561 (fourth) string in the Version 1 Version/ID tuple. */ 561 (fourth) string in the Version 1 Version/ID tuple. */
562 if (link->prod_id[3]) { 562 tuple->DesiredTuple = CISTPL_VERS_1;
563 station_addr = link->prod_id[3]; 563 if (first_tuple(link, tuple, parse) != CS_SUCCESS) {
564 rc = -1;
565 goto free_cfg_mem;
566 }
567 /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
568 if (next_tuple(link, tuple, parse) != CS_SUCCESS)
569 first_tuple(link, tuple, parse);
570 if (parse->version_1.ns > 3) {
571 station_addr = parse->version_1.str + parse->version_1.ofs[3];
564 if (cvt_ascii_address(dev, station_addr) == 0) { 572 if (cvt_ascii_address(dev, station_addr) == 0) {
565 rc = 0; 573 rc = 0;
566 goto free_cfg_mem; 574 goto free_cfg_mem;
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index c4b74e9fed20..4eb322e5273d 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -174,7 +174,11 @@ static int homepna[MAX_UNITS];
174#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) 174#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
175#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) 175#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
176 176
177#define PKT_BUF_SZ 1544 177#define PKT_BUF_SKB 1544
178/* actual buffer length after being aligned */
179#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
180/* chip wants twos complement of the (aligned) buffer length */
181#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
178 182
179/* Offsets from base I/O address. */ 183/* Offsets from base I/O address. */
180#define PCNET32_WIO_RDP 0x10 184#define PCNET32_WIO_RDP 0x10
@@ -604,7 +608,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
604 /* now allocate any new buffers needed */ 608 /* now allocate any new buffers needed */
605 for (; new < size; new++ ) { 609 for (; new < size; new++ ) {
606 struct sk_buff *rx_skbuff; 610 struct sk_buff *rx_skbuff;
607 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); 611 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
608 if (!(rx_skbuff = new_skb_list[new])) { 612 if (!(rx_skbuff = new_skb_list[new])) {
609 /* keep the original lists and buffers */ 613 /* keep the original lists and buffers */
610 if (netif_msg_drv(lp)) 614 if (netif_msg_drv(lp))
@@ -613,20 +617,20 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
613 dev->name); 617 dev->name);
614 goto free_all_new; 618 goto free_all_new;
615 } 619 }
616 skb_reserve(rx_skbuff, 2); 620 skb_reserve(rx_skbuff, NET_IP_ALIGN);
617 621
618 new_dma_addr_list[new] = 622 new_dma_addr_list[new] =
619 pci_map_single(lp->pci_dev, rx_skbuff->data, 623 pci_map_single(lp->pci_dev, rx_skbuff->data,
620 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 624 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
621 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); 625 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
622 new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 626 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
623 new_rx_ring[new].status = cpu_to_le16(0x8000); 627 new_rx_ring[new].status = cpu_to_le16(0x8000);
624 } 628 }
625 /* and free any unneeded buffers */ 629 /* and free any unneeded buffers */
626 for (; new < lp->rx_ring_size; new++) { 630 for (; new < lp->rx_ring_size; new++) {
627 if (lp->rx_skbuff[new]) { 631 if (lp->rx_skbuff[new]) {
628 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], 632 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
629 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 633 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
630 dev_kfree_skb(lp->rx_skbuff[new]); 634 dev_kfree_skb(lp->rx_skbuff[new]);
631 } 635 }
632 } 636 }
@@ -651,7 +655,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
651 for (; --new >= lp->rx_ring_size; ) { 655 for (; --new >= lp->rx_ring_size; ) {
652 if (new_skb_list[new]) { 656 if (new_skb_list[new]) {
653 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], 657 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
654 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 658 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
655 dev_kfree_skb(new_skb_list[new]); 659 dev_kfree_skb(new_skb_list[new]);
656 } 660 }
657 } 661 }
@@ -678,7 +682,7 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
678 wmb(); /* Make sure adapter sees owner change */ 682 wmb(); /* Make sure adapter sees owner change */
679 if (lp->rx_skbuff[i]) { 683 if (lp->rx_skbuff[i]) {
680 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], 684 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
681 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 685 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
682 dev_kfree_skb_any(lp->rx_skbuff[i]); 686 dev_kfree_skb_any(lp->rx_skbuff[i]);
683 } 687 }
684 lp->rx_skbuff[i] = NULL; 688 lp->rx_skbuff[i] = NULL;
@@ -1201,7 +1205,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1201 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; 1205 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
1202 1206
1203 /* Discard oversize frames. */ 1207 /* Discard oversize frames. */
1204 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { 1208 if (unlikely(pkt_len > PKT_BUF_SIZE)) {
1205 if (netif_msg_drv(lp)) 1209 if (netif_msg_drv(lp))
1206 printk(KERN_ERR "%s: Impossible packet size %d!\n", 1210 printk(KERN_ERR "%s: Impossible packet size %d!\n",
1207 dev->name, pkt_len); 1211 dev->name, pkt_len);
@@ -1218,26 +1222,26 @@ static void pcnet32_rx_entry(struct net_device *dev,
1218 if (pkt_len > rx_copybreak) { 1222 if (pkt_len > rx_copybreak) {
1219 struct sk_buff *newskb; 1223 struct sk_buff *newskb;
1220 1224
1221 if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { 1225 if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
1222 skb_reserve(newskb, 2); 1226 skb_reserve(newskb, NET_IP_ALIGN);
1223 skb = lp->rx_skbuff[entry]; 1227 skb = lp->rx_skbuff[entry];
1224 pci_unmap_single(lp->pci_dev, 1228 pci_unmap_single(lp->pci_dev,
1225 lp->rx_dma_addr[entry], 1229 lp->rx_dma_addr[entry],
1226 PKT_BUF_SZ - 2, 1230 PKT_BUF_SIZE,
1227 PCI_DMA_FROMDEVICE); 1231 PCI_DMA_FROMDEVICE);
1228 skb_put(skb, pkt_len); 1232 skb_put(skb, pkt_len);
1229 lp->rx_skbuff[entry] = newskb; 1233 lp->rx_skbuff[entry] = newskb;
1230 lp->rx_dma_addr[entry] = 1234 lp->rx_dma_addr[entry] =
1231 pci_map_single(lp->pci_dev, 1235 pci_map_single(lp->pci_dev,
1232 newskb->data, 1236 newskb->data,
1233 PKT_BUF_SZ - 2, 1237 PKT_BUF_SIZE,
1234 PCI_DMA_FROMDEVICE); 1238 PCI_DMA_FROMDEVICE);
1235 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); 1239 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
1236 rx_in_place = 1; 1240 rx_in_place = 1;
1237 } else 1241 } else
1238 skb = NULL; 1242 skb = NULL;
1239 } else { 1243 } else {
1240 skb = dev_alloc_skb(pkt_len + 2); 1244 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1241 } 1245 }
1242 1246
1243 if (skb == NULL) { 1247 if (skb == NULL) {
@@ -1250,7 +1254,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
1250 } 1254 }
1251 skb->dev = dev; 1255 skb->dev = dev;
1252 if (!rx_in_place) { 1256 if (!rx_in_place) {
1253 skb_reserve(skb, 2); /* 16 byte align */ 1257 skb_reserve(skb, NET_IP_ALIGN);
1254 skb_put(skb, pkt_len); /* Make room */ 1258 skb_put(skb, pkt_len); /* Make room */
1255 pci_dma_sync_single_for_cpu(lp->pci_dev, 1259 pci_dma_sync_single_for_cpu(lp->pci_dev,
1256 lp->rx_dma_addr[entry], 1260 lp->rx_dma_addr[entry],
@@ -1291,7 +1295,7 @@ static int pcnet32_rx(struct net_device *dev, int budget)
1291 * The docs say that the buffer length isn't touched, but Andrew 1295 * The docs say that the buffer length isn't touched, but Andrew
1292 * Boyd of QNX reports that some revs of the 79C965 clear it. 1296 * Boyd of QNX reports that some revs of the 79C965 clear it.
1293 */ 1297 */
1294 rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 1298 rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
1295 wmb(); /* Make sure owner changes after others are visible */ 1299 wmb(); /* Make sure owner changes after others are visible */
1296 rxp->status = cpu_to_le16(0x8000); 1300 rxp->status = cpu_to_le16(0x8000);
1297 entry = (++lp->cur_rx) & lp->rx_mod_mask; 1301 entry = (++lp->cur_rx) & lp->rx_mod_mask;
@@ -1774,8 +1778,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1774 memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1778 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1775 1779
1776 if (pcnet32_debug & NETIF_MSG_PROBE) { 1780 if (pcnet32_debug & NETIF_MSG_PROBE) {
1777 for (i = 0; i < 6; i++) 1781 DECLARE_MAC_BUF(mac);
1778 printk(" %2.2x", dev->dev_addr[i]); 1782 printk(" %s", print_mac(mac, dev->dev_addr));
1779 1783
1780 /* Version 0x2623 and 0x2624 */ 1784 /* Version 0x2623 and 0x2624 */
1781 if (((chip_version + 1) & 0xfffe) == 0x2624) { 1785 if (((chip_version + 1) & 0xfffe) == 0x2624) {
@@ -2396,7 +2400,7 @@ static int pcnet32_init_ring(struct net_device *dev)
2396 if (rx_skbuff == NULL) { 2400 if (rx_skbuff == NULL) {
2397 if (! 2401 if (!
2398 (rx_skbuff = lp->rx_skbuff[i] = 2402 (rx_skbuff = lp->rx_skbuff[i] =
2399 dev_alloc_skb(PKT_BUF_SZ))) { 2403 dev_alloc_skb(PKT_BUF_SKB))) {
2400 /* there is not much, we can do at this point */ 2404 /* there is not much, we can do at this point */
2401 if (netif_msg_drv(lp)) 2405 if (netif_msg_drv(lp))
2402 printk(KERN_ERR 2406 printk(KERN_ERR
@@ -2404,16 +2408,16 @@ static int pcnet32_init_ring(struct net_device *dev)
2404 dev->name); 2408 dev->name);
2405 return -1; 2409 return -1;
2406 } 2410 }
2407 skb_reserve(rx_skbuff, 2); 2411 skb_reserve(rx_skbuff, NET_IP_ALIGN);
2408 } 2412 }
2409 2413
2410 rmb(); 2414 rmb();
2411 if (lp->rx_dma_addr[i] == 0) 2415 if (lp->rx_dma_addr[i] == 0)
2412 lp->rx_dma_addr[i] = 2416 lp->rx_dma_addr[i] =
2413 pci_map_single(lp->pci_dev, rx_skbuff->data, 2417 pci_map_single(lp->pci_dev, rx_skbuff->data,
2414 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 2418 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
2415 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); 2419 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
2416 lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 2420 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
2417 wmb(); /* Make sure owner changes after all others are visible */ 2421 wmb(); /* Make sure owner changes after all others are visible */
2418 lp->rx_ring[i].status = cpu_to_le16(0x8000); 2422 lp->rx_ring[i].status = cpu_to_le16(0x8000);
2419 } 2423 }
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 73b6d39ef6b0..ca9b040f9ad9 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -236,12 +236,12 @@ module_init(fixed_mdio_bus_init);
236static void __exit fixed_mdio_bus_exit(void) 236static void __exit fixed_mdio_bus_exit(void)
237{ 237{
238 struct fixed_mdio_bus *fmb = &platform_fmb; 238 struct fixed_mdio_bus *fmb = &platform_fmb;
239 struct fixed_phy *fp; 239 struct fixed_phy *fp, *tmp;
240 240
241 mdiobus_unregister(&fmb->mii_bus); 241 mdiobus_unregister(&fmb->mii_bus);
242 platform_device_unregister(pdev); 242 platform_device_unregister(pdev);
243 243
244 list_for_each_entry(fp, &fmb->phys, node) { 244 list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
245 list_del(&fp->node); 245 list_del(&fp->node);
246 kfree(fp); 246 kfree(fp);
247 } 247 }
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 055af081e027..7eb6e7e848f4 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -46,29 +46,25 @@
46#include <asm/lv1call.h> 46#include <asm/lv1call.h>
47 47
48#include "ps3_gelic_net.h" 48#include "ps3_gelic_net.h"
49#include "ps3_gelic_wireless.h"
49 50
50#define DRV_NAME "Gelic Network Driver" 51#define DRV_NAME "Gelic Network Driver"
51#define DRV_VERSION "1.0" 52#define DRV_VERSION "2.0"
52 53
53MODULE_AUTHOR("SCE Inc."); 54MODULE_AUTHOR("SCE Inc.");
54MODULE_DESCRIPTION("Gelic Network driver"); 55MODULE_DESCRIPTION("Gelic Network driver");
55MODULE_LICENSE("GPL"); 56MODULE_LICENSE("GPL");
56 57
57static inline struct device *ctodev(struct gelic_net_card *card) 58
58{ 59static inline void gelic_card_enable_rxdmac(struct gelic_card *card);
59 return &card->dev->core; 60static inline void gelic_card_disable_rxdmac(struct gelic_card *card);
60} 61static inline void gelic_card_disable_txdmac(struct gelic_card *card);
61static inline u64 bus_id(struct gelic_net_card *card) 62static inline void gelic_card_reset_chain(struct gelic_card *card,
62{ 63 struct gelic_descr_chain *chain,
63 return card->dev->bus_id; 64 struct gelic_descr *start_descr);
64}
65static inline u64 dev_id(struct gelic_net_card *card)
66{
67 return card->dev->dev_id;
68}
69 65
70/* set irq_mask */ 66/* set irq_mask */
71static int gelic_net_set_irq_mask(struct gelic_net_card *card, u64 mask) 67int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
72{ 68{
73 int status; 69 int status;
74 70
@@ -76,54 +72,110 @@ static int gelic_net_set_irq_mask(struct gelic_net_card *card, u64 mask)
76 mask, 0); 72 mask, 0);
77 if (status) 73 if (status)
78 dev_info(ctodev(card), 74 dev_info(ctodev(card),
79 "lv1_net_set_interrupt_mask failed %d\n", status); 75 "%s failed %d\n", __func__, status);
80 return status; 76 return status;
81} 77}
82static inline void gelic_net_rx_irq_on(struct gelic_net_card *card) 78
79static inline void gelic_card_rx_irq_on(struct gelic_card *card)
83{ 80{
84 gelic_net_set_irq_mask(card, card->ghiintmask | GELIC_NET_RXINT); 81 card->irq_mask |= GELIC_CARD_RXINT;
82 gelic_card_set_irq_mask(card, card->irq_mask);
85} 83}
86static inline void gelic_net_rx_irq_off(struct gelic_net_card *card) 84static inline void gelic_card_rx_irq_off(struct gelic_card *card)
87{ 85{
88 gelic_net_set_irq_mask(card, card->ghiintmask & ~GELIC_NET_RXINT); 86 card->irq_mask &= ~GELIC_CARD_RXINT;
87 gelic_card_set_irq_mask(card, card->irq_mask);
88}
89
90static void gelic_card_get_ether_port_status(struct gelic_card *card,
91 int inform)
92{
93 u64 v2;
94 struct net_device *ether_netdev;
95
96 lv1_net_control(bus_id(card), dev_id(card),
97 GELIC_LV1_GET_ETH_PORT_STATUS,
98 GELIC_LV1_VLAN_TX_ETHERNET, 0, 0,
99 &card->ether_port_status, &v2);
100
101 if (inform) {
102 ether_netdev = card->netdev[GELIC_PORT_ETHERNET];
103 if (card->ether_port_status & GELIC_LV1_ETHER_LINK_UP)
104 netif_carrier_on(ether_netdev);
105 else
106 netif_carrier_off(ether_netdev);
107 }
108}
109
110void gelic_card_up(struct gelic_card *card)
111{
112 pr_debug("%s: called\n", __func__);
113 down(&card->updown_lock);
114 if (atomic_inc_return(&card->users) == 1) {
115 pr_debug("%s: real do\n", __func__);
116 /* enable irq */
117 gelic_card_set_irq_mask(card, card->irq_mask);
118 /* start rx */
119 gelic_card_enable_rxdmac(card);
120
121 napi_enable(&card->napi);
122 }
123 up(&card->updown_lock);
124 pr_debug("%s: done\n", __func__);
89} 125}
126
127void gelic_card_down(struct gelic_card *card)
128{
129 u64 mask;
130 pr_debug("%s: called\n", __func__);
131 down(&card->updown_lock);
132 if (atomic_dec_if_positive(&card->users) == 0) {
133 pr_debug("%s: real do\n", __func__);
134 napi_disable(&card->napi);
135 /*
136 * Disable irq. Wireless interrupts will
137 * be disabled later if any
138 */
139 mask = card->irq_mask & (GELIC_CARD_WLAN_EVENT_RECEIVED |
140 GELIC_CARD_WLAN_COMMAND_COMPLETED);
141 gelic_card_set_irq_mask(card, mask);
142 /* stop rx */
143 gelic_card_disable_rxdmac(card);
144 gelic_card_reset_chain(card, &card->rx_chain,
145 card->descr + GELIC_NET_TX_DESCRIPTORS);
146 /* stop tx */
147 gelic_card_disable_txdmac(card);
148 }
149 up(&card->updown_lock);
150 pr_debug("%s: done\n", __func__);
151}
152
90/** 153/**
91 * gelic_net_get_descr_status -- returns the status of a descriptor 154 * gelic_descr_get_status -- returns the status of a descriptor
92 * @descr: descriptor to look at 155 * @descr: descriptor to look at
93 * 156 *
94 * returns the status as in the dmac_cmd_status field of the descriptor 157 * returns the status as in the dmac_cmd_status field of the descriptor
95 */ 158 */
96static enum gelic_net_descr_status 159static enum gelic_descr_dma_status
97gelic_net_get_descr_status(struct gelic_net_descr *descr) 160gelic_descr_get_status(struct gelic_descr *descr)
98{ 161{
99 u32 cmd_status; 162 return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK;
100
101 cmd_status = descr->dmac_cmd_status;
102 cmd_status >>= GELIC_NET_DESCR_IND_PROC_SHIFT;
103 return cmd_status;
104} 163}
105 164
106/** 165/**
107 * gelic_net_set_descr_status -- sets the status of a descriptor 166 * gelic_descr_set_status -- sets the status of a descriptor
108 * @descr: descriptor to change 167 * @descr: descriptor to change
109 * @status: status to set in the descriptor 168 * @status: status to set in the descriptor
110 * 169 *
111 * changes the status to the specified value. Doesn't change other bits 170 * changes the status to the specified value. Doesn't change other bits
112 * in the status 171 * in the status
113 */ 172 */
114static void gelic_net_set_descr_status(struct gelic_net_descr *descr, 173static void gelic_descr_set_status(struct gelic_descr *descr,
115 enum gelic_net_descr_status status) 174 enum gelic_descr_dma_status status)
116{ 175{
117 u32 cmd_status; 176 descr->dmac_cmd_status = cpu_to_be32(status |
118 177 (be32_to_cpu(descr->dmac_cmd_status) &
119 /* read the status */ 178 ~GELIC_DESCR_DMA_STAT_MASK));
120 cmd_status = descr->dmac_cmd_status;
121 /* clean the upper 4 bits */
122 cmd_status &= GELIC_NET_DESCR_IND_PROC_MASKO;
123 /* add the status to it */
124 cmd_status |= ((u32)status) << GELIC_NET_DESCR_IND_PROC_SHIFT;
125 /* and write it back */
126 descr->dmac_cmd_status = cmd_status;
127 /* 179 /*
128 * dma_cmd_status field is used to indicate whether the descriptor 180 * dma_cmd_status field is used to indicate whether the descriptor
129 * is valid or not. 181 * is valid or not.
@@ -134,24 +186,24 @@ static void gelic_net_set_descr_status(struct gelic_net_descr *descr,
134} 186}
135 187
136/** 188/**
137 * gelic_net_free_chain - free descriptor chain 189 * gelic_card_free_chain - free descriptor chain
138 * @card: card structure 190 * @card: card structure
139 * @descr_in: address of desc 191 * @descr_in: address of desc
140 */ 192 */
141static void gelic_net_free_chain(struct gelic_net_card *card, 193static void gelic_card_free_chain(struct gelic_card *card,
142 struct gelic_net_descr *descr_in) 194 struct gelic_descr *descr_in)
143{ 195{
144 struct gelic_net_descr *descr; 196 struct gelic_descr *descr;
145 197
146 for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) { 198 for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) {
147 dma_unmap_single(ctodev(card), descr->bus_addr, 199 dma_unmap_single(ctodev(card), descr->bus_addr,
148 GELIC_NET_DESCR_SIZE, DMA_BIDIRECTIONAL); 200 GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
149 descr->bus_addr = 0; 201 descr->bus_addr = 0;
150 } 202 }
151} 203}
152 204
153/** 205/**
154 * gelic_net_init_chain - links descriptor chain 206 * gelic_card_init_chain - links descriptor chain
155 * @card: card structure 207 * @card: card structure
156 * @chain: address of chain 208 * @chain: address of chain
157 * @start_descr: address of descriptor array 209 * @start_descr: address of descriptor array
@@ -162,22 +214,22 @@ static void gelic_net_free_chain(struct gelic_net_card *card,
162 * 214 *
163 * returns 0 on success, <0 on failure 215 * returns 0 on success, <0 on failure
164 */ 216 */
165static int gelic_net_init_chain(struct gelic_net_card *card, 217static int gelic_card_init_chain(struct gelic_card *card,
166 struct gelic_net_descr_chain *chain, 218 struct gelic_descr_chain *chain,
167 struct gelic_net_descr *start_descr, int no) 219 struct gelic_descr *start_descr, int no)
168{ 220{
169 int i; 221 int i;
170 struct gelic_net_descr *descr; 222 struct gelic_descr *descr;
171 223
172 descr = start_descr; 224 descr = start_descr;
173 memset(descr, 0, sizeof(*descr) * no); 225 memset(descr, 0, sizeof(*descr) * no);
174 226
175 /* set up the hardware pointers in each descriptor */ 227 /* set up the hardware pointers in each descriptor */
176 for (i = 0; i < no; i++, descr++) { 228 for (i = 0; i < no; i++, descr++) {
177 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 229 gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
178 descr->bus_addr = 230 descr->bus_addr =
179 dma_map_single(ctodev(card), descr, 231 dma_map_single(ctodev(card), descr,
180 GELIC_NET_DESCR_SIZE, 232 GELIC_DESCR_SIZE,
181 DMA_BIDIRECTIONAL); 233 DMA_BIDIRECTIONAL);
182 234
183 if (!descr->bus_addr) 235 if (!descr->bus_addr)
@@ -193,7 +245,7 @@ static int gelic_net_init_chain(struct gelic_net_card *card,
193 /* chain bus addr of hw descriptor */ 245 /* chain bus addr of hw descriptor */
194 descr = start_descr; 246 descr = start_descr;
195 for (i = 0; i < no; i++, descr++) { 247 for (i = 0; i < no; i++, descr++) {
196 descr->next_descr_addr = descr->next->bus_addr; 248 descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
197 } 249 }
198 250
199 chain->head = start_descr; 251 chain->head = start_descr;
@@ -208,13 +260,38 @@ iommu_error:
208 for (i--, descr--; 0 <= i; i--, descr--) 260 for (i--, descr--; 0 <= i; i--, descr--)
209 if (descr->bus_addr) 261 if (descr->bus_addr)
210 dma_unmap_single(ctodev(card), descr->bus_addr, 262 dma_unmap_single(ctodev(card), descr->bus_addr,
211 GELIC_NET_DESCR_SIZE, 263 GELIC_DESCR_SIZE,
212 DMA_BIDIRECTIONAL); 264 DMA_BIDIRECTIONAL);
213 return -ENOMEM; 265 return -ENOMEM;
214} 266}
215 267
216/** 268/**
217 * gelic_net_prepare_rx_descr - reinitializes a rx descriptor 269 * gelic_card_reset_chain - reset status of a descriptor chain
270 * @card: card structure
271 * @chain: address of chain
272 * @start_descr: address of descriptor array
273 *
274 * Reset the status of dma descriptors to ready state
275 * and re-initialize the hardware chain for later use
276 */
277static void gelic_card_reset_chain(struct gelic_card *card,
278 struct gelic_descr_chain *chain,
279 struct gelic_descr *start_descr)
280{
281 struct gelic_descr *descr;
282
283 for (descr = start_descr; start_descr != descr->next; descr++) {
284 gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
285 descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
286 }
287
288 chain->head = start_descr;
289 chain->tail = (descr - 1);
290
291 (descr - 1)->next_descr_addr = 0;
292}
293/**
294 * gelic_descr_prepare_rx - reinitializes a rx descriptor
218 * @card: card structure 295 * @card: card structure
219 * @descr: descriptor to re-init 296 * @descr: descriptor to re-init
220 * 297 *
@@ -223,29 +300,27 @@ iommu_error:
223 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. 300 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
224 * Activate the descriptor state-wise 301 * Activate the descriptor state-wise
225 */ 302 */
226static int gelic_net_prepare_rx_descr(struct gelic_net_card *card, 303static int gelic_descr_prepare_rx(struct gelic_card *card,
227 struct gelic_net_descr *descr) 304 struct gelic_descr *descr)
228{ 305{
229 int offset; 306 int offset;
230 unsigned int bufsize; 307 unsigned int bufsize;
231 308
232 if (gelic_net_get_descr_status(descr) != GELIC_NET_DESCR_NOT_IN_USE) { 309 if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
233 dev_info(ctodev(card), "%s: ERROR status \n", __func__); 310 dev_info(ctodev(card), "%s: ERROR status \n", __func__);
234 }
235 /* we need to round up the buffer size to a multiple of 128 */ 311 /* we need to round up the buffer size to a multiple of 128 */
236 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); 312 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
237 313
238 /* and we need to have it 128 byte aligned, therefore we allocate a 314 /* and we need to have it 128 byte aligned, therefore we allocate a
239 * bit more */ 315 * bit more */
240 descr->skb = netdev_alloc_skb(card->netdev, 316 descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
241 bufsize + GELIC_NET_RXBUF_ALIGN - 1);
242 if (!descr->skb) { 317 if (!descr->skb) {
243 descr->buf_addr = 0; /* tell DMAC don't touch memory */ 318 descr->buf_addr = 0; /* tell DMAC don't touch memory */
244 dev_info(ctodev(card), 319 dev_info(ctodev(card),
245 "%s:allocate skb failed !!\n", __func__); 320 "%s:allocate skb failed !!\n", __func__);
246 return -ENOMEM; 321 return -ENOMEM;
247 } 322 }
248 descr->buf_size = bufsize; 323 descr->buf_size = cpu_to_be32(bufsize);
249 descr->dmac_cmd_status = 0; 324 descr->dmac_cmd_status = 0;
250 descr->result_size = 0; 325 descr->result_size = 0;
251 descr->valid_size = 0; 326 descr->valid_size = 0;
@@ -256,63 +331,64 @@ static int gelic_net_prepare_rx_descr(struct gelic_net_card *card,
256 if (offset) 331 if (offset)
257 skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); 332 skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
258 /* io-mmu-map the skb */ 333 /* io-mmu-map the skb */
259 descr->buf_addr = dma_map_single(ctodev(card), descr->skb->data, 334 descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
260 GELIC_NET_MAX_MTU, 335 descr->skb->data,
261 DMA_FROM_DEVICE); 336 GELIC_NET_MAX_MTU,
337 DMA_FROM_DEVICE));
262 if (!descr->buf_addr) { 338 if (!descr->buf_addr) {
263 dev_kfree_skb_any(descr->skb); 339 dev_kfree_skb_any(descr->skb);
264 descr->skb = NULL; 340 descr->skb = NULL;
265 dev_info(ctodev(card), 341 dev_info(ctodev(card),
266 "%s:Could not iommu-map rx buffer\n", __func__); 342 "%s:Could not iommu-map rx buffer\n", __func__);
267 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 343 gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
268 return -ENOMEM; 344 return -ENOMEM;
269 } else { 345 } else {
270 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_CARDOWNED); 346 gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
271 return 0; 347 return 0;
272 } 348 }
273} 349}
274 350
275/** 351/**
276 * gelic_net_release_rx_chain - free all skb of rx descr 352 * gelic_card_release_rx_chain - free all skb of rx descr
277 * @card: card structure 353 * @card: card structure
278 * 354 *
279 */ 355 */
280static void gelic_net_release_rx_chain(struct gelic_net_card *card) 356static void gelic_card_release_rx_chain(struct gelic_card *card)
281{ 357{
282 struct gelic_net_descr *descr = card->rx_chain.head; 358 struct gelic_descr *descr = card->rx_chain.head;
283 359
284 do { 360 do {
285 if (descr->skb) { 361 if (descr->skb) {
286 dma_unmap_single(ctodev(card), 362 dma_unmap_single(ctodev(card),
287 descr->buf_addr, 363 be32_to_cpu(descr->buf_addr),
288 descr->skb->len, 364 descr->skb->len,
289 DMA_FROM_DEVICE); 365 DMA_FROM_DEVICE);
290 descr->buf_addr = 0; 366 descr->buf_addr = 0;
291 dev_kfree_skb_any(descr->skb); 367 dev_kfree_skb_any(descr->skb);
292 descr->skb = NULL; 368 descr->skb = NULL;
293 gelic_net_set_descr_status(descr, 369 gelic_descr_set_status(descr,
294 GELIC_NET_DESCR_NOT_IN_USE); 370 GELIC_DESCR_DMA_NOT_IN_USE);
295 } 371 }
296 descr = descr->next; 372 descr = descr->next;
297 } while (descr != card->rx_chain.head); 373 } while (descr != card->rx_chain.head);
298} 374}
299 375
300/** 376/**
301 * gelic_net_fill_rx_chain - fills descriptors/skbs in the rx chains 377 * gelic_card_fill_rx_chain - fills descriptors/skbs in the rx chains
302 * @card: card structure 378 * @card: card structure
303 * 379 *
304 * fills all descriptors in the rx chain: allocates skbs 380 * fills all descriptors in the rx chain: allocates skbs
305 * and iommu-maps them. 381 * and iommu-maps them.
306 * returns 0 on success, <0 on failure 382 * returns 0 on success, < 0 on failure
307 */ 383 */
308static int gelic_net_fill_rx_chain(struct gelic_net_card *card) 384static int gelic_card_fill_rx_chain(struct gelic_card *card)
309{ 385{
310 struct gelic_net_descr *descr = card->rx_chain.head; 386 struct gelic_descr *descr = card->rx_chain.head;
311 int ret; 387 int ret;
312 388
313 do { 389 do {
314 if (!descr->skb) { 390 if (!descr->skb) {
315 ret = gelic_net_prepare_rx_descr(card, descr); 391 ret = gelic_descr_prepare_rx(card, descr);
316 if (ret) 392 if (ret)
317 goto rewind; 393 goto rewind;
318 } 394 }
@@ -321,41 +397,41 @@ static int gelic_net_fill_rx_chain(struct gelic_net_card *card)
321 397
322 return 0; 398 return 0;
323rewind: 399rewind:
324 gelic_net_release_rx_chain(card); 400 gelic_card_release_rx_chain(card);
325 return ret; 401 return ret;
326} 402}
327 403
328/** 404/**
329 * gelic_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains 405 * gelic_card_alloc_rx_skbs - allocates rx skbs in rx descriptor chains
330 * @card: card structure 406 * @card: card structure
331 * 407 *
332 * returns 0 on success, <0 on failure 408 * returns 0 on success, < 0 on failure
333 */ 409 */
334static int gelic_net_alloc_rx_skbs(struct gelic_net_card *card) 410static int gelic_card_alloc_rx_skbs(struct gelic_card *card)
335{ 411{
336 struct gelic_net_descr_chain *chain; 412 struct gelic_descr_chain *chain;
337 int ret; 413 int ret;
338 chain = &card->rx_chain; 414 chain = &card->rx_chain;
339 ret = gelic_net_fill_rx_chain(card); 415 ret = gelic_card_fill_rx_chain(card);
340 chain->head = card->rx_top->prev; /* point to the last */ 416 chain->tail = card->rx_top->prev; /* point to the last */
341 return ret; 417 return ret;
342} 418}
343 419
344/** 420/**
345 * gelic_net_release_tx_descr - processes a used tx descriptor 421 * gelic_descr_release_tx - processes a used tx descriptor
346 * @card: card structure 422 * @card: card structure
347 * @descr: descriptor to release 423 * @descr: descriptor to release
348 * 424 *
349 * releases a used tx descriptor (unmapping, freeing of skb) 425 * releases a used tx descriptor (unmapping, freeing of skb)
350 */ 426 */
351static void gelic_net_release_tx_descr(struct gelic_net_card *card, 427static void gelic_descr_release_tx(struct gelic_card *card,
352 struct gelic_net_descr *descr) 428 struct gelic_descr *descr)
353{ 429{
354 struct sk_buff *skb = descr->skb; 430 struct sk_buff *skb = descr->skb;
355 431
356 BUG_ON(!(descr->data_status & (1 << GELIC_NET_TXDESC_TAIL))); 432 BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL));
357 433
358 dma_unmap_single(ctodev(card), descr->buf_addr, skb->len, 434 dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len,
359 DMA_TO_DEVICE); 435 DMA_TO_DEVICE);
360 dev_kfree_skb_any(skb); 436 dev_kfree_skb_any(skb);
361 437
@@ -369,59 +445,75 @@ static void gelic_net_release_tx_descr(struct gelic_net_card *card,
369 descr->skb = NULL; 445 descr->skb = NULL;
370 446
371 /* set descr status */ 447 /* set descr status */
372 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 448 gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
449}
450
451static void gelic_card_stop_queues(struct gelic_card *card)
452{
453 netif_stop_queue(card->netdev[GELIC_PORT_ETHERNET]);
454
455 if (card->netdev[GELIC_PORT_WIRELESS])
456 netif_stop_queue(card->netdev[GELIC_PORT_WIRELESS]);
373} 457}
458static void gelic_card_wake_queues(struct gelic_card *card)
459{
460 netif_wake_queue(card->netdev[GELIC_PORT_ETHERNET]);
374 461
462 if (card->netdev[GELIC_PORT_WIRELESS])
463 netif_wake_queue(card->netdev[GELIC_PORT_WIRELESS]);
464}
375/** 465/**
376 * gelic_net_release_tx_chain - processes sent tx descriptors 466 * gelic_card_release_tx_chain - processes sent tx descriptors
377 * @card: adapter structure 467 * @card: adapter structure
378 * @stop: net_stop sequence 468 * @stop: net_stop sequence
379 * 469 *
380 * releases the tx descriptors that gelic has finished with 470 * releases the tx descriptors that gelic has finished with
381 */ 471 */
382static void gelic_net_release_tx_chain(struct gelic_net_card *card, int stop) 472static void gelic_card_release_tx_chain(struct gelic_card *card, int stop)
383{ 473{
384 struct gelic_net_descr_chain *tx_chain; 474 struct gelic_descr_chain *tx_chain;
385 enum gelic_net_descr_status status; 475 enum gelic_descr_dma_status status;
476 struct net_device *netdev;
386 int release = 0; 477 int release = 0;
387 478
388 for (tx_chain = &card->tx_chain; 479 for (tx_chain = &card->tx_chain;
389 tx_chain->head != tx_chain->tail && tx_chain->tail; 480 tx_chain->head != tx_chain->tail && tx_chain->tail;
390 tx_chain->tail = tx_chain->tail->next) { 481 tx_chain->tail = tx_chain->tail->next) {
391 status = gelic_net_get_descr_status(tx_chain->tail); 482 status = gelic_descr_get_status(tx_chain->tail);
483 netdev = tx_chain->tail->skb->dev;
392 switch (status) { 484 switch (status) {
393 case GELIC_NET_DESCR_RESPONSE_ERROR: 485 case GELIC_DESCR_DMA_RESPONSE_ERROR:
394 case GELIC_NET_DESCR_PROTECTION_ERROR: 486 case GELIC_DESCR_DMA_PROTECTION_ERROR:
395 case GELIC_NET_DESCR_FORCE_END: 487 case GELIC_DESCR_DMA_FORCE_END:
396 if (printk_ratelimit()) 488 if (printk_ratelimit())
397 dev_info(ctodev(card), 489 dev_info(ctodev(card),
398 "%s: forcing end of tx descriptor " \ 490 "%s: forcing end of tx descriptor " \
399 "with status %x\n", 491 "with status %x\n",
400 __func__, status); 492 __func__, status);
401 card->netdev->stats.tx_dropped++; 493 netdev->stats.tx_dropped++;
402 break; 494 break;
403 495
404 case GELIC_NET_DESCR_COMPLETE: 496 case GELIC_DESCR_DMA_COMPLETE:
405 if (tx_chain->tail->skb) { 497 if (tx_chain->tail->skb) {
406 card->netdev->stats.tx_packets++; 498 netdev->stats.tx_packets++;
407 card->netdev->stats.tx_bytes += 499 netdev->stats.tx_bytes +=
408 tx_chain->tail->skb->len; 500 tx_chain->tail->skb->len;
409 } 501 }
410 break; 502 break;
411 503
412 case GELIC_NET_DESCR_CARDOWNED: 504 case GELIC_DESCR_DMA_CARDOWNED:
413 /* pending tx request */ 505 /* pending tx request */
414 default: 506 default:
415 /* any other value (== GELIC_NET_DESCR_NOT_IN_USE) */ 507 /* any other value (== GELIC_DESCR_DMA_NOT_IN_USE) */
416 if (!stop) 508 if (!stop)
417 goto out; 509 goto out;
418 } 510 }
419 gelic_net_release_tx_descr(card, tx_chain->tail); 511 gelic_descr_release_tx(card, tx_chain->tail);
420 release ++; 512 release ++;
421 } 513 }
422out: 514out:
423 if (!stop && release) 515 if (!stop && release)
424 netif_wake_queue(card->netdev); 516 gelic_card_wake_queues(card);
425} 517}
426 518
427/** 519/**
@@ -432,9 +524,9 @@ out:
432 * netdev interface. It also sets up multicast, allmulti and promisc 524 * netdev interface. It also sets up multicast, allmulti and promisc
433 * flags appropriately 525 * flags appropriately
434 */ 526 */
435static void gelic_net_set_multi(struct net_device *netdev) 527void gelic_net_set_multi(struct net_device *netdev)
436{ 528{
437 struct gelic_net_card *card = netdev_priv(netdev); 529 struct gelic_card *card = netdev_card(netdev);
438 struct dev_mc_list *mc; 530 struct dev_mc_list *mc;
439 unsigned int i; 531 unsigned int i;
440 uint8_t *p; 532 uint8_t *p;
@@ -456,8 +548,8 @@ static void gelic_net_set_multi(struct net_device *netdev)
456 "lv1_net_add_multicast_address failed, %d\n", 548 "lv1_net_add_multicast_address failed, %d\n",
457 status); 549 status);
458 550
459 if (netdev->flags & IFF_ALLMULTI 551 if ((netdev->flags & IFF_ALLMULTI) ||
460 || netdev->mc_count > GELIC_NET_MC_COUNT_MAX) { /* list max */ 552 (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) {
461 status = lv1_net_add_multicast_address(bus_id(card), 553 status = lv1_net_add_multicast_address(bus_id(card),
462 dev_id(card), 554 dev_id(card),
463 0, 1); 555 0, 1);
@@ -468,7 +560,7 @@ static void gelic_net_set_multi(struct net_device *netdev)
468 return; 560 return;
469 } 561 }
470 562
471 /* set multicast address */ 563 /* set multicast addresses */
472 for (mc = netdev->mc_list; mc; mc = mc->next) { 564 for (mc = netdev->mc_list; mc; mc = mc->next) {
473 addr = 0; 565 addr = 0;
474 p = mc->dmi_addr; 566 p = mc->dmi_addr;
@@ -487,31 +579,42 @@ static void gelic_net_set_multi(struct net_device *netdev)
487} 579}
488 580
489/** 581/**
490 * gelic_net_enable_rxdmac - enables the receive DMA controller 582 * gelic_card_enable_rxdmac - enables the receive DMA controller
491 * @card: card structure 583 * @card: card structure
492 * 584 *
493 * gelic_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 585 * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
494 * in the GDADMACCNTR register 586 * in the GDADMACCNTR register
495 */ 587 */
496static inline void gelic_net_enable_rxdmac(struct gelic_net_card *card) 588static inline void gelic_card_enable_rxdmac(struct gelic_card *card)
497{ 589{
498 int status; 590 int status;
499 591
592#ifdef DEBUG
593 if (gelic_descr_get_status(card->rx_chain.head) !=
594 GELIC_DESCR_DMA_CARDOWNED) {
595 printk(KERN_ERR "%s: status=%x\n", __func__,
596 be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
597 printk(KERN_ERR "%s: nextphy=%x\n", __func__,
598 be32_to_cpu(card->rx_chain.head->next_descr_addr));
599 printk(KERN_ERR "%s: head=%p\n", __func__,
600 card->rx_chain.head);
601 }
602#endif
500 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), 603 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
501 card->rx_chain.tail->bus_addr, 0); 604 card->rx_chain.head->bus_addr, 0);
502 if (status) 605 if (status)
503 dev_info(ctodev(card), 606 dev_info(ctodev(card),
504 "lv1_net_start_rx_dma failed, status=%d\n", status); 607 "lv1_net_start_rx_dma failed, status=%d\n", status);
505} 608}
506 609
507/** 610/**
508 * gelic_net_disable_rxdmac - disables the receive DMA controller 611 * gelic_card_disable_rxdmac - disables the receive DMA controller
509 * @card: card structure 612 * @card: card structure
510 * 613 *
511 * gelic_net_disable_rxdmac terminates processing on the DMA controller by 614 * gelic_card_disable_rxdmac terminates processing on the DMA controller by
512 * turing off DMA and issueing a force end 615 * turing off DMA and issueing a force end
513 */ 616 */
514static inline void gelic_net_disable_rxdmac(struct gelic_net_card *card) 617static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
515{ 618{
516 int status; 619 int status;
517 620
@@ -523,13 +626,13 @@ static inline void gelic_net_disable_rxdmac(struct gelic_net_card *card)
523} 626}
524 627
525/** 628/**
526 * gelic_net_disable_txdmac - disables the transmit DMA controller 629 * gelic_card_disable_txdmac - disables the transmit DMA controller
527 * @card: card structure 630 * @card: card structure
528 * 631 *
529 * gelic_net_disable_txdmac terminates processing on the DMA controller by 632 * gelic_card_disable_txdmac terminates processing on the DMA controller by
530 * turing off DMA and issueing a force end 633 * turing off DMA and issueing a force end
531 */ 634 */
532static inline void gelic_net_disable_txdmac(struct gelic_net_card *card) 635static inline void gelic_card_disable_txdmac(struct gelic_card *card)
533{ 636{
534 int status; 637 int status;
535 638
@@ -546,51 +649,37 @@ static inline void gelic_net_disable_txdmac(struct gelic_net_card *card)
546 * 649 *
547 * always returns 0 650 * always returns 0
548 */ 651 */
549static int gelic_net_stop(struct net_device *netdev) 652int gelic_net_stop(struct net_device *netdev)
550{ 653{
551 struct gelic_net_card *card = netdev_priv(netdev); 654 struct gelic_card *card;
552
553 napi_disable(&card->napi);
554 netif_stop_queue(netdev);
555 655
556 /* turn off DMA, force end */ 656 pr_debug("%s: start\n", __func__);
557 gelic_net_disable_rxdmac(card);
558 gelic_net_disable_txdmac(card);
559
560 gelic_net_set_irq_mask(card, 0);
561
562 /* disconnect event port */
563 free_irq(card->netdev->irq, card->netdev);
564 ps3_sb_event_receive_port_destroy(card->dev, card->netdev->irq);
565 card->netdev->irq = NO_IRQ;
566 657
658 netif_stop_queue(netdev);
567 netif_carrier_off(netdev); 659 netif_carrier_off(netdev);
568 660
569 /* release chains */ 661 card = netdev_card(netdev);
570 gelic_net_release_tx_chain(card, 1); 662 gelic_card_down(card);
571 gelic_net_release_rx_chain(card);
572
573 gelic_net_free_chain(card, card->tx_top);
574 gelic_net_free_chain(card, card->rx_top);
575 663
664 pr_debug("%s: done\n", __func__);
576 return 0; 665 return 0;
577} 666}
578 667
579/** 668/**
580 * gelic_net_get_next_tx_descr - returns the next available tx descriptor 669 * gelic_card_get_next_tx_descr - returns the next available tx descriptor
581 * @card: device structure to get descriptor from 670 * @card: device structure to get descriptor from
582 * 671 *
583 * returns the address of the next descriptor, or NULL if not available. 672 * returns the address of the next descriptor, or NULL if not available.
584 */ 673 */
585static struct gelic_net_descr * 674static struct gelic_descr *
586gelic_net_get_next_tx_descr(struct gelic_net_card *card) 675gelic_card_get_next_tx_descr(struct gelic_card *card)
587{ 676{
588 if (!card->tx_chain.head) 677 if (!card->tx_chain.head)
589 return NULL; 678 return NULL;
590 /* see if the next descriptor is free */ 679 /* see if the next descriptor is free */
591 if (card->tx_chain.tail != card->tx_chain.head->next && 680 if (card->tx_chain.tail != card->tx_chain.head->next &&
592 gelic_net_get_descr_status(card->tx_chain.head) == 681 gelic_descr_get_status(card->tx_chain.head) ==
593 GELIC_NET_DESCR_NOT_IN_USE) 682 GELIC_DESCR_DMA_NOT_IN_USE)
594 return card->tx_chain.head; 683 return card->tx_chain.head;
595 else 684 else
596 return NULL; 685 return NULL;
@@ -606,32 +695,33 @@ gelic_net_get_next_tx_descr(struct gelic_net_card *card)
606 * depending on hardware checksum settings. This function assumes a wmb() 695 * depending on hardware checksum settings. This function assumes a wmb()
607 * has executed before. 696 * has executed before.
608 */ 697 */
609static void gelic_net_set_txdescr_cmdstat(struct gelic_net_descr *descr, 698static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
610 struct sk_buff *skb) 699 struct sk_buff *skb)
611{ 700{
612 if (skb->ip_summed != CHECKSUM_PARTIAL) 701 if (skb->ip_summed != CHECKSUM_PARTIAL)
613 descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOCS | 702 descr->dmac_cmd_status =
614 GELIC_NET_DMAC_CMDSTAT_END_FRAME; 703 cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
704 GELIC_DESCR_TX_DMA_FRAME_TAIL);
615 else { 705 else {
616 /* is packet ip? 706 /* is packet ip?
617 * if yes: tcp? udp? */ 707 * if yes: tcp? udp? */
618 if (skb->protocol == htons(ETH_P_IP)) { 708 if (skb->protocol == htons(ETH_P_IP)) {
619 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 709 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
620 descr->dmac_cmd_status = 710 descr->dmac_cmd_status =
621 GELIC_NET_DMAC_CMDSTAT_TCPCS | 711 cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM |
622 GELIC_NET_DMAC_CMDSTAT_END_FRAME; 712 GELIC_DESCR_TX_DMA_FRAME_TAIL);
623 713
624 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 714 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
625 descr->dmac_cmd_status = 715 descr->dmac_cmd_status =
626 GELIC_NET_DMAC_CMDSTAT_UDPCS | 716 cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM |
627 GELIC_NET_DMAC_CMDSTAT_END_FRAME; 717 GELIC_DESCR_TX_DMA_FRAME_TAIL);
628 else /* 718 else /*
629 * the stack should checksum non-tcp and non-udp 719 * the stack should checksum non-tcp and non-udp
630 * packets on his own: NETIF_F_IP_CSUM 720 * packets on his own: NETIF_F_IP_CSUM
631 */ 721 */
632 descr->dmac_cmd_status = 722 descr->dmac_cmd_status =
633 GELIC_NET_DMAC_CMDSTAT_NOCS | 723 cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM |
634 GELIC_NET_DMAC_CMDSTAT_END_FRAME; 724 GELIC_DESCR_TX_DMA_FRAME_TAIL);
635 } 725 }
636 } 726 }
637} 727}
@@ -662,7 +752,7 @@ static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
662} 752}
663 753
664/** 754/**
665 * gelic_net_prepare_tx_descr_v - get dma address of skb_data 755 * gelic_descr_prepare_tx - setup a descriptor for sending packets
666 * @card: card structure 756 * @card: card structure
667 * @descr: descriptor structure 757 * @descr: descriptor structure
668 * @skb: packet to use 758 * @skb: packet to use
@@ -670,16 +760,19 @@ static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
670 * returns 0 on success, <0 on failure. 760 * returns 0 on success, <0 on failure.
671 * 761 *
672 */ 762 */
673static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card, 763static int gelic_descr_prepare_tx(struct gelic_card *card,
674 struct gelic_net_descr *descr, 764 struct gelic_descr *descr,
675 struct sk_buff *skb) 765 struct sk_buff *skb)
676{ 766{
677 dma_addr_t buf; 767 dma_addr_t buf;
678 768
679 if (card->vlan_index != -1) { 769 if (card->vlan_required) {
680 struct sk_buff *skb_tmp; 770 struct sk_buff *skb_tmp;
771 enum gelic_port_type type;
772
773 type = netdev_port(skb->dev)->type;
681 skb_tmp = gelic_put_vlan_tag(skb, 774 skb_tmp = gelic_put_vlan_tag(skb,
682 card->vlan_id[card->vlan_index]); 775 card->vlan[type].tx);
683 if (!skb_tmp) 776 if (!skb_tmp)
684 return -ENOMEM; 777 return -ENOMEM;
685 skb = skb_tmp; 778 skb = skb_tmp;
@@ -694,12 +787,12 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
694 return -ENOMEM; 787 return -ENOMEM;
695 } 788 }
696 789
697 descr->buf_addr = buf; 790 descr->buf_addr = cpu_to_be32(buf);
698 descr->buf_size = skb->len; 791 descr->buf_size = cpu_to_be32(skb->len);
699 descr->skb = skb; 792 descr->skb = skb;
700 descr->data_status = 0; 793 descr->data_status = 0;
701 descr->next_descr_addr = 0; /* terminate hw descr */ 794 descr->next_descr_addr = 0; /* terminate hw descr */
702 gelic_net_set_txdescr_cmdstat(descr, skb); 795 gelic_descr_set_tx_cmdstat(descr, skb);
703 796
704 /* bump free descriptor pointer */ 797 /* bump free descriptor pointer */
705 card->tx_chain.head = descr->next; 798 card->tx_chain.head = descr->next;
@@ -707,20 +800,20 @@ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card,
707} 800}
708 801
709/** 802/**
710 * gelic_net_kick_txdma - enables TX DMA processing 803 * gelic_card_kick_txdma - enables TX DMA processing
711 * @card: card structure 804 * @card: card structure
712 * @descr: descriptor address to enable TX processing at 805 * @descr: descriptor address to enable TX processing at
713 * 806 *
714 */ 807 */
715static int gelic_net_kick_txdma(struct gelic_net_card *card, 808static int gelic_card_kick_txdma(struct gelic_card *card,
716 struct gelic_net_descr *descr) 809 struct gelic_descr *descr)
717{ 810{
718 int status = 0; 811 int status = 0;
719 812
720 if (card->tx_dma_progress) 813 if (card->tx_dma_progress)
721 return 0; 814 return 0;
722 815
723 if (gelic_net_get_descr_status(descr) == GELIC_NET_DESCR_CARDOWNED) { 816 if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) {
724 card->tx_dma_progress = 1; 817 card->tx_dma_progress = 1;
725 status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), 818 status = lv1_net_start_tx_dma(bus_id(card), dev_id(card),
726 descr->bus_addr, 0); 819 descr->bus_addr, 0);
@@ -738,56 +831,56 @@ static int gelic_net_kick_txdma(struct gelic_net_card *card,
738 * 831 *
739 * returns 0 on success, <0 on failure 832 * returns 0 on success, <0 on failure
740 */ 833 */
741static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 834int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
742{ 835{
743 struct gelic_net_card *card = netdev_priv(netdev); 836 struct gelic_card *card = netdev_card(netdev);
744 struct gelic_net_descr *descr; 837 struct gelic_descr *descr;
745 int result; 838 int result;
746 unsigned long flags; 839 unsigned long flags;
747 840
748 spin_lock_irqsave(&card->tx_dma_lock, flags); 841 spin_lock_irqsave(&card->tx_lock, flags);
749 842
750 gelic_net_release_tx_chain(card, 0); 843 gelic_card_release_tx_chain(card, 0);
751 844
752 descr = gelic_net_get_next_tx_descr(card); 845 descr = gelic_card_get_next_tx_descr(card);
753 if (!descr) { 846 if (!descr) {
754 /* 847 /*
755 * no more descriptors free 848 * no more descriptors free
756 */ 849 */
757 netif_stop_queue(netdev); 850 gelic_card_stop_queues(card);
758 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 851 spin_unlock_irqrestore(&card->tx_lock, flags);
759 return NETDEV_TX_BUSY; 852 return NETDEV_TX_BUSY;
760 } 853 }
761 854
762 result = gelic_net_prepare_tx_descr_v(card, descr, skb); 855 result = gelic_descr_prepare_tx(card, descr, skb);
763 if (result) { 856 if (result) {
764 /* 857 /*
765 * DMA map failed. As chanses are that failure 858 * DMA map failed. As chanses are that failure
766 * would continue, just release skb and return 859 * would continue, just release skb and return
767 */ 860 */
768 card->netdev->stats.tx_dropped++; 861 netdev->stats.tx_dropped++;
769 dev_kfree_skb_any(skb); 862 dev_kfree_skb_any(skb);
770 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 863 spin_unlock_irqrestore(&card->tx_lock, flags);
771 return NETDEV_TX_OK; 864 return NETDEV_TX_OK;
772 } 865 }
773 /* 866 /*
774 * link this prepared descriptor to previous one 867 * link this prepared descriptor to previous one
775 * to achieve high performance 868 * to achieve high performance
776 */ 869 */
777 descr->prev->next_descr_addr = descr->bus_addr; 870 descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
778 /* 871 /*
779 * as hardware descriptor is modified in the above lines, 872 * as hardware descriptor is modified in the above lines,
780 * ensure that the hardware sees it 873 * ensure that the hardware sees it
781 */ 874 */
782 wmb(); 875 wmb();
783 if (gelic_net_kick_txdma(card, descr)) { 876 if (gelic_card_kick_txdma(card, descr)) {
784 /* 877 /*
785 * kick failed. 878 * kick failed.
786 * release descriptors which were just prepared 879 * release descriptors which were just prepared
787 */ 880 */
788 card->netdev->stats.tx_dropped++; 881 netdev->stats.tx_dropped++;
789 gelic_net_release_tx_descr(card, descr); 882 gelic_descr_release_tx(card, descr);
790 gelic_net_release_tx_descr(card, descr->next); 883 gelic_descr_release_tx(card, descr->next);
791 card->tx_chain.tail = descr->next->next; 884 card->tx_chain.tail = descr->next->next;
792 dev_info(ctodev(card), "%s: kick failure\n", __func__); 885 dev_info(ctodev(card), "%s: kick failure\n", __func__);
793 } else { 886 } else {
@@ -795,7 +888,7 @@ static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
795 netdev->trans_start = jiffies; 888 netdev->trans_start = jiffies;
796 } 889 }
797 890
798 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 891 spin_unlock_irqrestore(&card->tx_lock, flags);
799 return NETDEV_TX_OK; 892 return NETDEV_TX_OK;
800} 893}
801 894
@@ -803,30 +896,34 @@ static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
803 * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on 896 * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on
804 * @descr: descriptor to process 897 * @descr: descriptor to process
805 * @card: card structure 898 * @card: card structure
899 * @netdev: net_device structure to be passed packet
806 * 900 *
807 * iommu-unmaps the skb, fills out skb structure and passes the data to the 901 * iommu-unmaps the skb, fills out skb structure and passes the data to the
808 * stack. The descriptor state is not changed. 902 * stack. The descriptor state is not changed.
809 */ 903 */
810static void gelic_net_pass_skb_up(struct gelic_net_descr *descr, 904static void gelic_net_pass_skb_up(struct gelic_descr *descr,
811 struct gelic_net_card *card) 905 struct gelic_card *card,
906 struct net_device *netdev)
907
812{ 908{
813 struct sk_buff *skb; 909 struct sk_buff *skb = descr->skb;
814 struct net_device *netdev;
815 u32 data_status, data_error; 910 u32 data_status, data_error;
816 911
817 data_status = descr->data_status; 912 data_status = be32_to_cpu(descr->data_status);
818 data_error = descr->data_error; 913 data_error = be32_to_cpu(descr->data_error);
819 netdev = card->netdev;
820 /* unmap skb buffer */ 914 /* unmap skb buffer */
821 skb = descr->skb; 915 dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
822 dma_unmap_single(ctodev(card), descr->buf_addr, GELIC_NET_MAX_MTU, 916 GELIC_NET_MAX_MTU,
823 DMA_FROM_DEVICE); 917 DMA_FROM_DEVICE);
824 918
825 skb_put(skb, descr->valid_size? descr->valid_size : descr->result_size); 919 skb_put(skb, be32_to_cpu(descr->valid_size)?
920 be32_to_cpu(descr->valid_size) :
921 be32_to_cpu(descr->result_size));
826 if (!descr->valid_size) 922 if (!descr->valid_size)
827 dev_info(ctodev(card), "buffer full %x %x %x\n", 923 dev_info(ctodev(card), "buffer full %x %x %x\n",
828 descr->result_size, descr->buf_size, 924 be32_to_cpu(descr->result_size),
829 descr->dmac_cmd_status); 925 be32_to_cpu(descr->buf_size),
926 be32_to_cpu(descr->dmac_cmd_status));
830 927
831 descr->skb = NULL; 928 descr->skb = NULL;
832 /* 929 /*
@@ -838,8 +935,8 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
838 935
839 /* checksum offload */ 936 /* checksum offload */
840 if (card->rx_csum) { 937 if (card->rx_csum) {
841 if ((data_status & GELIC_NET_DATA_STATUS_CHK_MASK) && 938 if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) &&
842 (!(data_error & GELIC_NET_DATA_ERROR_CHK_MASK))) 939 (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
843 skb->ip_summed = CHECKSUM_UNNECESSARY; 940 skb->ip_summed = CHECKSUM_UNNECESSARY;
844 else 941 else
845 skb->ip_summed = CHECKSUM_NONE; 942 skb->ip_summed = CHECKSUM_NONE;
@@ -847,15 +944,15 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
847 skb->ip_summed = CHECKSUM_NONE; 944 skb->ip_summed = CHECKSUM_NONE;
848 945
849 /* update netdevice statistics */ 946 /* update netdevice statistics */
850 card->netdev->stats.rx_packets++; 947 netdev->stats.rx_packets++;
851 card->netdev->stats.rx_bytes += skb->len; 948 netdev->stats.rx_bytes += skb->len;
852 949
853 /* pass skb up to stack */ 950 /* pass skb up to stack */
854 netif_receive_skb(skb); 951 netif_receive_skb(skb);
855} 952}
856 953
857/** 954/**
858 * gelic_net_decode_one_descr - processes an rx descriptor 955 * gelic_card_decode_one_descr - processes an rx descriptor
859 * @card: card structure 956 * @card: card structure
860 * 957 *
861 * returns 1 if a packet has been sent to the stack, otherwise 0 958 * returns 1 if a packet has been sent to the stack, otherwise 0
@@ -863,36 +960,56 @@ static void gelic_net_pass_skb_up(struct gelic_net_descr *descr,
863 * processes an rx descriptor by iommu-unmapping the data buffer and passing 960 * processes an rx descriptor by iommu-unmapping the data buffer and passing
864 * the packet up to the stack 961 * the packet up to the stack
865 */ 962 */
866static int gelic_net_decode_one_descr(struct gelic_net_card *card) 963static int gelic_card_decode_one_descr(struct gelic_card *card)
867{ 964{
868 enum gelic_net_descr_status status; 965 enum gelic_descr_dma_status status;
869 struct gelic_net_descr_chain *chain = &card->rx_chain; 966 struct gelic_descr_chain *chain = &card->rx_chain;
870 struct gelic_net_descr *descr = chain->tail; 967 struct gelic_descr *descr = chain->head;
968 struct net_device *netdev = NULL;
871 int dmac_chain_ended; 969 int dmac_chain_ended;
872 970
873 status = gelic_net_get_descr_status(descr); 971 status = gelic_descr_get_status(descr);
874 /* is this descriptor terminated with next_descr == NULL? */ 972 /* is this descriptor terminated with next_descr == NULL? */
875 dmac_chain_ended = 973 dmac_chain_ended =
876 descr->dmac_cmd_status & GELIC_NET_DMAC_CMDSTAT_RXDCEIS; 974 be32_to_cpu(descr->dmac_cmd_status) &
975 GELIC_DESCR_RX_DMA_CHAIN_END;
877 976
878 if (status == GELIC_NET_DESCR_CARDOWNED) 977 if (status == GELIC_DESCR_DMA_CARDOWNED)
879 return 0; 978 return 0;
880 979
881 if (status == GELIC_NET_DESCR_NOT_IN_USE) { 980 if (status == GELIC_DESCR_DMA_NOT_IN_USE) {
882 dev_dbg(ctodev(card), "dormant descr? %p\n", descr); 981 dev_dbg(ctodev(card), "dormant descr? %p\n", descr);
883 return 0; 982 return 0;
884 } 983 }
885 984
886 if ((status == GELIC_NET_DESCR_RESPONSE_ERROR) || 985 /* netdevice select */
887 (status == GELIC_NET_DESCR_PROTECTION_ERROR) || 986 if (card->vlan_required) {
888 (status == GELIC_NET_DESCR_FORCE_END)) { 987 unsigned int i;
988 u16 vid;
989 vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK;
990 for (i = 0; i < GELIC_PORT_MAX; i++) {
991 if (card->vlan[i].rx == vid) {
992 netdev = card->netdev[i];
993 break;
994 }
995 };
996 if (GELIC_PORT_MAX <= i) {
997 pr_info("%s: unknown packet vid=%x\n", __func__, vid);
998 goto refill;
999 }
1000 } else
1001 netdev = card->netdev[GELIC_PORT_ETHERNET];
1002
1003 if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) ||
1004 (status == GELIC_DESCR_DMA_PROTECTION_ERROR) ||
1005 (status == GELIC_DESCR_DMA_FORCE_END)) {
889 dev_info(ctodev(card), "dropping RX descriptor with state %x\n", 1006 dev_info(ctodev(card), "dropping RX descriptor with state %x\n",
890 status); 1007 status);
891 card->netdev->stats.rx_dropped++; 1008 netdev->stats.rx_dropped++;
892 goto refill; 1009 goto refill;
893 } 1010 }
894 1011
895 if (status == GELIC_NET_DESCR_BUFFER_FULL) { 1012 if (status == GELIC_DESCR_DMA_BUFFER_FULL) {
896 /* 1013 /*
897 * Buffer full would occur if and only if 1014 * Buffer full would occur if and only if
898 * the frame length was longer than the size of this 1015 * the frame length was longer than the size of this
@@ -909,14 +1026,14 @@ static int gelic_net_decode_one_descr(struct gelic_net_card *card)
909 * descriptoers any other than FRAME_END here should 1026 * descriptoers any other than FRAME_END here should
910 * be treated as error. 1027 * be treated as error.
911 */ 1028 */
912 if (status != GELIC_NET_DESCR_FRAME_END) { 1029 if (status != GELIC_DESCR_DMA_FRAME_END) {
913 dev_dbg(ctodev(card), "RX descriptor with state %x\n", 1030 dev_dbg(ctodev(card), "RX descriptor with state %x\n",
914 status); 1031 status);
915 goto refill; 1032 goto refill;
916 } 1033 }
917 1034
918 /* ok, we've got a packet in descr */ 1035 /* ok, we've got a packet in descr */
919 gelic_net_pass_skb_up(descr, card); 1036 gelic_net_pass_skb_up(descr, card, netdev);
920refill: 1037refill:
921 /* 1038 /*
922 * So that always DMAC can see the end 1039 * So that always DMAC can see the end
@@ -926,21 +1043,21 @@ refill:
926 descr->next_descr_addr = 0; 1043 descr->next_descr_addr = 0;
927 1044
928 /* change the descriptor state: */ 1045 /* change the descriptor state: */
929 gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 1046 gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
930 1047
931 /* 1048 /*
932 * this call can fail, but for now, just leave this 1049 * this call can fail, but for now, just leave this
933 * decriptor without skb 1050 * decriptor without skb
934 */ 1051 */
935 gelic_net_prepare_rx_descr(card, descr); 1052 gelic_descr_prepare_rx(card, descr);
936 1053
937 chain->head = descr; 1054 chain->tail = descr;
938 chain->tail = descr->next; 1055 chain->head = descr->next;
939 1056
940 /* 1057 /*
941 * Set this descriptor the end of the chain. 1058 * Set this descriptor the end of the chain.
942 */ 1059 */
943 descr->prev->next_descr_addr = descr->bus_addr; 1060 descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr);
944 1061
945 /* 1062 /*
946 * If dmac chain was met, DMAC stopped. 1063 * If dmac chain was met, DMAC stopped.
@@ -956,29 +1073,27 @@ refill:
956 1073
957/** 1074/**
958 * gelic_net_poll - NAPI poll function called by the stack to return packets 1075 * gelic_net_poll - NAPI poll function called by the stack to return packets
959 * @netdev: interface device structure 1076 * @napi: napi structure
960 * @budget: number of packets we can pass to the stack at most 1077 * @budget: number of packets we can pass to the stack at most
961 * 1078 *
962 * returns 0 if no more packets available to the driver/stack. Returns 1, 1079 * returns the number of the processed packets
963 * if the quota is exceeded, but the driver has still packets.
964 * 1080 *
965 */ 1081 */
966static int gelic_net_poll(struct napi_struct *napi, int budget) 1082static int gelic_net_poll(struct napi_struct *napi, int budget)
967{ 1083{
968 struct gelic_net_card *card = container_of(napi, struct gelic_net_card, napi); 1084 struct gelic_card *card = container_of(napi, struct gelic_card, napi);
969 struct net_device *netdev = card->netdev;
970 int packets_done = 0; 1085 int packets_done = 0;
971 1086
972 while (packets_done < budget) { 1087 while (packets_done < budget) {
973 if (!gelic_net_decode_one_descr(card)) 1088 if (!gelic_card_decode_one_descr(card))
974 break; 1089 break;
975 1090
976 packets_done++; 1091 packets_done++;
977 } 1092 }
978 1093
979 if (packets_done < budget) { 1094 if (packets_done < budget) {
980 netif_rx_complete(netdev, napi); 1095 napi_complete(napi);
981 gelic_net_rx_irq_on(card); 1096 gelic_card_rx_irq_on(card);
982 } 1097 }
983 return packets_done; 1098 return packets_done;
984} 1099}
@@ -989,7 +1104,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
989 * 1104 *
990 * returns 0 on success, <0 on failure 1105 * returns 0 on success, <0 on failure
991 */ 1106 */
992static int gelic_net_change_mtu(struct net_device *netdev, int new_mtu) 1107int gelic_net_change_mtu(struct net_device *netdev, int new_mtu)
993{ 1108{
994 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k 1109 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
995 * and mtu is outbound only anyway */ 1110 * and mtu is outbound only anyway */
@@ -1002,13 +1117,12 @@ static int gelic_net_change_mtu(struct net_device *netdev, int new_mtu)
1002} 1117}
1003 1118
1004/** 1119/**
1005 * gelic_net_interrupt - event handler for gelic_net 1120 * gelic_card_interrupt - event handler for gelic_net
1006 */ 1121 */
1007static irqreturn_t gelic_net_interrupt(int irq, void *ptr) 1122static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
1008{ 1123{
1009 unsigned long flags; 1124 unsigned long flags;
1010 struct net_device *netdev = ptr; 1125 struct gelic_card *card = ptr;
1011 struct gelic_net_card *card = netdev_priv(netdev);
1012 u64 status; 1126 u64 status;
1013 1127
1014 status = card->irq_status; 1128 status = card->irq_status;
@@ -1016,24 +1130,37 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
1016 if (!status) 1130 if (!status)
1017 return IRQ_NONE; 1131 return IRQ_NONE;
1018 1132
1133 status &= card->irq_mask;
1134
1019 if (card->rx_dma_restart_required) { 1135 if (card->rx_dma_restart_required) {
1020 card->rx_dma_restart_required = 0; 1136 card->rx_dma_restart_required = 0;
1021 gelic_net_enable_rxdmac(card); 1137 gelic_card_enable_rxdmac(card);
1022 } 1138 }
1023 1139
1024 if (status & GELIC_NET_RXINT) { 1140 if (status & GELIC_CARD_RXINT) {
1025 gelic_net_rx_irq_off(card); 1141 gelic_card_rx_irq_off(card);
1026 netif_rx_schedule(netdev, &card->napi); 1142 napi_schedule(&card->napi);
1027 } 1143 }
1028 1144
1029 if (status & GELIC_NET_TXINT) { 1145 if (status & GELIC_CARD_TXINT) {
1030 spin_lock_irqsave(&card->tx_dma_lock, flags); 1146 spin_lock_irqsave(&card->tx_lock, flags);
1031 card->tx_dma_progress = 0; 1147 card->tx_dma_progress = 0;
1032 gelic_net_release_tx_chain(card, 0); 1148 gelic_card_release_tx_chain(card, 0);
1033 /* kick outstanding tx descriptor if any */ 1149 /* kick outstanding tx descriptor if any */
1034 gelic_net_kick_txdma(card, card->tx_chain.tail); 1150 gelic_card_kick_txdma(card, card->tx_chain.tail);
1035 spin_unlock_irqrestore(&card->tx_dma_lock, flags); 1151 spin_unlock_irqrestore(&card->tx_lock, flags);
1036 } 1152 }
1153
1154 /* ether port status changed */
1155 if (status & GELIC_CARD_PORT_STATUS_CHANGED)
1156 gelic_card_get_ether_port_status(card, 1);
1157
1158#ifdef CONFIG_GELIC_WIRELESS
1159 if (status & (GELIC_CARD_WLAN_EVENT_RECEIVED |
1160 GELIC_CARD_WLAN_COMMAND_COMPLETED))
1161 gelic_wl_interrupt(card->netdev[GELIC_PORT_WIRELESS], status);
1162#endif
1163
1037 return IRQ_HANDLED; 1164 return IRQ_HANDLED;
1038} 1165}
1039 1166
@@ -1044,55 +1171,17 @@ static irqreturn_t gelic_net_interrupt(int irq, void *ptr)
1044 * 1171 *
1045 * see Documentation/networking/netconsole.txt 1172 * see Documentation/networking/netconsole.txt
1046 */ 1173 */
1047static void gelic_net_poll_controller(struct net_device *netdev) 1174void gelic_net_poll_controller(struct net_device *netdev)
1048{ 1175{
1049 struct gelic_net_card *card = netdev_priv(netdev); 1176 struct gelic_card *card = netdev_card(netdev);
1050 1177
1051 gelic_net_set_irq_mask(card, 0); 1178 gelic_card_set_irq_mask(card, 0);
1052 gelic_net_interrupt(netdev->irq, netdev); 1179 gelic_card_interrupt(netdev->irq, netdev);
1053 gelic_net_set_irq_mask(card, card->ghiintmask); 1180 gelic_card_set_irq_mask(card, card->irq_mask);
1054} 1181}
1055#endif /* CONFIG_NET_POLL_CONTROLLER */ 1182#endif /* CONFIG_NET_POLL_CONTROLLER */
1056 1183
1057/** 1184/**
1058 * gelic_net_open_device - open device and map dma region
1059 * @card: card structure
1060 */
1061static int gelic_net_open_device(struct gelic_net_card *card)
1062{
1063 int result;
1064
1065 result = ps3_sb_event_receive_port_setup(card->dev, PS3_BINDING_CPU_ANY,
1066 &card->netdev->irq);
1067
1068 if (result) {
1069 dev_info(ctodev(card),
1070 "%s:%d: gelic_net_open_device failed (%d)\n",
1071 __func__, __LINE__, result);
1072 result = -EPERM;
1073 goto fail_alloc_irq;
1074 }
1075
1076 result = request_irq(card->netdev->irq, gelic_net_interrupt,
1077 IRQF_DISABLED, card->netdev->name, card->netdev);
1078
1079 if (result) {
1080 dev_info(ctodev(card), "%s:%d: request_irq failed (%d)\n",
1081 __func__, __LINE__, result);
1082 goto fail_request_irq;
1083 }
1084
1085 return 0;
1086
1087fail_request_irq:
1088 ps3_sb_event_receive_port_destroy(card->dev, card->netdev->irq);
1089 card->netdev->irq = NO_IRQ;
1090fail_alloc_irq:
1091 return result;
1092}
1093
1094
1095/**
1096 * gelic_net_open - called upon ifonfig up 1185 * gelic_net_open - called upon ifonfig up
1097 * @netdev: interface device structure 1186 * @netdev: interface device structure
1098 * 1187 *
@@ -1101,169 +1190,88 @@ fail_alloc_irq:
1101 * gelic_net_open allocates all the descriptors and memory needed for 1190 * gelic_net_open allocates all the descriptors and memory needed for
1102 * operation, sets up multicast list and enables interrupts 1191 * operation, sets up multicast list and enables interrupts
1103 */ 1192 */
1104static int gelic_net_open(struct net_device *netdev) 1193int gelic_net_open(struct net_device *netdev)
1105{ 1194{
1106 struct gelic_net_card *card = netdev_priv(netdev); 1195 struct gelic_card *card = netdev_card(netdev);
1107
1108 dev_dbg(ctodev(card), " -> %s:%d\n", __func__, __LINE__);
1109
1110 gelic_net_open_device(card);
1111
1112 if (gelic_net_init_chain(card, &card->tx_chain,
1113 card->descr, GELIC_NET_TX_DESCRIPTORS))
1114 goto alloc_tx_failed;
1115 if (gelic_net_init_chain(card, &card->rx_chain,
1116 card->descr + GELIC_NET_TX_DESCRIPTORS,
1117 GELIC_NET_RX_DESCRIPTORS))
1118 goto alloc_rx_failed;
1119
1120 /* head of chain */
1121 card->tx_top = card->tx_chain.head;
1122 card->rx_top = card->rx_chain.head;
1123 dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n",
1124 card->rx_top, card->tx_top, sizeof(struct gelic_net_descr),
1125 GELIC_NET_RX_DESCRIPTORS);
1126 /* allocate rx skbs */
1127 if (gelic_net_alloc_rx_skbs(card))
1128 goto alloc_skbs_failed;
1129 1196
1130 napi_enable(&card->napi); 1197 dev_dbg(ctodev(card), " -> %s %p\n", __func__, netdev);
1131
1132 card->tx_dma_progress = 0;
1133 card->ghiintmask = GELIC_NET_RXINT | GELIC_NET_TXINT;
1134 1198
1135 gelic_net_set_irq_mask(card, card->ghiintmask); 1199 gelic_card_up(card);
1136 gelic_net_enable_rxdmac(card);
1137 1200
1138 netif_start_queue(netdev); 1201 netif_start_queue(netdev);
1139 netif_carrier_on(netdev); 1202 gelic_card_get_ether_port_status(card, 1);
1140 1203
1204 dev_dbg(ctodev(card), " <- %s\n", __func__);
1141 return 0; 1205 return 0;
1142
1143alloc_skbs_failed:
1144 gelic_net_free_chain(card, card->rx_top);
1145alloc_rx_failed:
1146 gelic_net_free_chain(card, card->tx_top);
1147alloc_tx_failed:
1148 return -ENOMEM;
1149} 1206}
1150 1207
1151static void gelic_net_get_drvinfo (struct net_device *netdev, 1208void gelic_net_get_drvinfo(struct net_device *netdev,
1152 struct ethtool_drvinfo *info) 1209 struct ethtool_drvinfo *info)
1153{ 1210{
1154 strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1); 1211 strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
1155 strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1); 1212 strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
1156} 1213}
1157 1214
1158static int gelic_net_get_settings(struct net_device *netdev, 1215static int gelic_ether_get_settings(struct net_device *netdev,
1159 struct ethtool_cmd *cmd) 1216 struct ethtool_cmd *cmd)
1160{ 1217{
1161 struct gelic_net_card *card = netdev_priv(netdev); 1218 struct gelic_card *card = netdev_card(netdev);
1162 int status;
1163 u64 v1, v2;
1164 int speed, duplex;
1165 1219
1166 speed = duplex = -1; 1220 gelic_card_get_ether_port_status(card, 0);
1167 status = lv1_net_control(bus_id(card), dev_id(card),
1168 GELIC_NET_GET_ETH_PORT_STATUS, GELIC_NET_PORT, 0, 0,
1169 &v1, &v2);
1170 if (status) {
1171 /* link down */
1172 } else {
1173 if (v1 & GELIC_NET_FULL_DUPLEX) {
1174 duplex = DUPLEX_FULL;
1175 } else {
1176 duplex = DUPLEX_HALF;
1177 }
1178 1221
1179 if (v1 & GELIC_NET_SPEED_10 ) { 1222 if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX)
1180 speed = SPEED_10; 1223 cmd->duplex = DUPLEX_FULL;
1181 } else if (v1 & GELIC_NET_SPEED_100) { 1224 else
1182 speed = SPEED_100; 1225 cmd->duplex = DUPLEX_HALF;
1183 } else if (v1 & GELIC_NET_SPEED_1000) { 1226
1184 speed = SPEED_1000; 1227 switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
1185 } 1228 case GELIC_LV1_ETHER_SPEED_10:
1229 cmd->speed = SPEED_10;
1230 break;
1231 case GELIC_LV1_ETHER_SPEED_100:
1232 cmd->speed = SPEED_100;
1233 break;
1234 case GELIC_LV1_ETHER_SPEED_1000:
1235 cmd->speed = SPEED_1000;
1236 break;
1237 default:
1238 pr_info("%s: speed unknown\n", __func__);
1239 cmd->speed = SPEED_10;
1240 break;
1186 } 1241 }
1242
1187 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | 1243 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg |
1188 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 1244 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
1189 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 1245 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1190 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; 1246 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
1191 cmd->advertising = cmd->supported; 1247 cmd->advertising = cmd->supported;
1192 cmd->speed = speed;
1193 cmd->duplex = duplex;
1194 cmd->autoneg = AUTONEG_ENABLE; /* always enabled */ 1248 cmd->autoneg = AUTONEG_ENABLE; /* always enabled */
1195 cmd->port = PORT_TP; 1249 cmd->port = PORT_TP;
1196 1250
1197 return 0; 1251 return 0;
1198} 1252}
1199 1253
1200static u32 gelic_net_get_link(struct net_device *netdev) 1254u32 gelic_net_get_rx_csum(struct net_device *netdev)
1201{ 1255{
1202 struct gelic_net_card *card = netdev_priv(netdev); 1256 struct gelic_card *card = netdev_card(netdev);
1203 int status;
1204 u64 v1, v2;
1205 int link;
1206
1207 status = lv1_net_control(bus_id(card), dev_id(card),
1208 GELIC_NET_GET_ETH_PORT_STATUS, GELIC_NET_PORT, 0, 0,
1209 &v1, &v2);
1210 if (status)
1211 return 0; /* link down */
1212
1213 if (v1 & GELIC_NET_LINK_UP)
1214 link = 1;
1215 else
1216 link = 0;
1217
1218 return link;
1219}
1220
1221static int gelic_net_nway_reset(struct net_device *netdev)
1222{
1223 if (netif_running(netdev)) {
1224 gelic_net_stop(netdev);
1225 gelic_net_open(netdev);
1226 }
1227 return 0;
1228}
1229
1230static u32 gelic_net_get_tx_csum(struct net_device *netdev)
1231{
1232 return (netdev->features & NETIF_F_IP_CSUM) != 0;
1233}
1234
1235static int gelic_net_set_tx_csum(struct net_device *netdev, u32 data)
1236{
1237 if (data)
1238 netdev->features |= NETIF_F_IP_CSUM;
1239 else
1240 netdev->features &= ~NETIF_F_IP_CSUM;
1241
1242 return 0;
1243}
1244
1245static u32 gelic_net_get_rx_csum(struct net_device *netdev)
1246{
1247 struct gelic_net_card *card = netdev_priv(netdev);
1248 1257
1249 return card->rx_csum; 1258 return card->rx_csum;
1250} 1259}
1251 1260
1252static int gelic_net_set_rx_csum(struct net_device *netdev, u32 data) 1261int gelic_net_set_rx_csum(struct net_device *netdev, u32 data)
1253{ 1262{
1254 struct gelic_net_card *card = netdev_priv(netdev); 1263 struct gelic_card *card = netdev_card(netdev);
1255 1264
1256 card->rx_csum = data; 1265 card->rx_csum = data;
1257 return 0; 1266 return 0;
1258} 1267}
1259 1268
1260static struct ethtool_ops gelic_net_ethtool_ops = { 1269static struct ethtool_ops gelic_ether_ethtool_ops = {
1261 .get_drvinfo = gelic_net_get_drvinfo, 1270 .get_drvinfo = gelic_net_get_drvinfo,
1262 .get_settings = gelic_net_get_settings, 1271 .get_settings = gelic_ether_get_settings,
1263 .get_link = gelic_net_get_link, 1272 .get_link = ethtool_op_get_link,
1264 .nway_reset = gelic_net_nway_reset, 1273 .get_tx_csum = ethtool_op_get_tx_csum,
1265 .get_tx_csum = gelic_net_get_tx_csum, 1274 .set_tx_csum = ethtool_op_set_tx_csum,
1266 .set_tx_csum = gelic_net_set_tx_csum,
1267 .get_rx_csum = gelic_net_get_rx_csum, 1275 .get_rx_csum = gelic_net_get_rx_csum,
1268 .set_rx_csum = gelic_net_set_rx_csum, 1276 .set_rx_csum = gelic_net_set_rx_csum,
1269}; 1277};
@@ -1277,9 +1285,9 @@ static struct ethtool_ops gelic_net_ethtool_ops = {
1277 */ 1285 */
1278static void gelic_net_tx_timeout_task(struct work_struct *work) 1286static void gelic_net_tx_timeout_task(struct work_struct *work)
1279{ 1287{
1280 struct gelic_net_card *card = 1288 struct gelic_card *card =
1281 container_of(work, struct gelic_net_card, tx_timeout_task); 1289 container_of(work, struct gelic_card, tx_timeout_task);
1282 struct net_device *netdev = card->netdev; 1290 struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET];
1283 1291
1284 dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__); 1292 dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__);
1285 1293
@@ -1302,11 +1310,11 @@ out:
1302 * 1310 *
1303 * called, if tx hangs. Schedules a task that resets the interface 1311 * called, if tx hangs. Schedules a task that resets the interface
1304 */ 1312 */
1305static void gelic_net_tx_timeout(struct net_device *netdev) 1313void gelic_net_tx_timeout(struct net_device *netdev)
1306{ 1314{
1307 struct gelic_net_card *card; 1315 struct gelic_card *card;
1308 1316
1309 card = netdev_priv(netdev); 1317 card = netdev_card(netdev);
1310 atomic_inc(&card->tx_timeout_task_counter); 1318 atomic_inc(&card->tx_timeout_task_counter);
1311 if (netdev->flags & IFF_UP) 1319 if (netdev->flags & IFF_UP)
1312 schedule_work(&card->tx_timeout_task); 1320 schedule_work(&card->tx_timeout_task);
@@ -1315,12 +1323,13 @@ static void gelic_net_tx_timeout(struct net_device *netdev)
1315} 1323}
1316 1324
1317/** 1325/**
1318 * gelic_net_setup_netdev_ops - initialization of net_device operations 1326 * gelic_ether_setup_netdev_ops - initialization of net_device operations
1319 * @netdev: net_device structure 1327 * @netdev: net_device structure
1320 * 1328 *
1321 * fills out function pointers in the net_device structure 1329 * fills out function pointers in the net_device structure
1322 */ 1330 */
1323static void gelic_net_setup_netdev_ops(struct net_device *netdev) 1331static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
1332 struct napi_struct *napi)
1324{ 1333{
1325 netdev->open = &gelic_net_open; 1334 netdev->open = &gelic_net_open;
1326 netdev->stop = &gelic_net_stop; 1335 netdev->stop = &gelic_net_stop;
@@ -1330,163 +1339,239 @@ static void gelic_net_setup_netdev_ops(struct net_device *netdev)
1330 /* tx watchdog */ 1339 /* tx watchdog */
1331 netdev->tx_timeout = &gelic_net_tx_timeout; 1340 netdev->tx_timeout = &gelic_net_tx_timeout;
1332 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1341 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
1333 netdev->ethtool_ops = &gelic_net_ethtool_ops; 1342 /* NAPI */
1343 netif_napi_add(netdev, napi,
1344 gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
1345 netdev->ethtool_ops = &gelic_ether_ethtool_ops;
1346#ifdef CONFIG_NET_POLL_CONTROLLER
1347 netdev->poll_controller = gelic_net_poll_controller;
1348#endif
1334} 1349}
1335 1350
1336/** 1351/**
1337 * gelic_net_setup_netdev - initialization of net_device 1352 * gelic_ether_setup_netdev - initialization of net_device
1353 * @netdev: net_device structure
1338 * @card: card structure 1354 * @card: card structure
1339 * 1355 *
1340 * Returns 0 on success or <0 on failure 1356 * Returns 0 on success or <0 on failure
1341 * 1357 *
1342 * gelic_net_setup_netdev initializes the net_device structure 1358 * gelic_ether_setup_netdev initializes the net_device structure
1359 * and register it.
1343 **/ 1360 **/
1344static int gelic_net_setup_netdev(struct gelic_net_card *card) 1361int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
1345{ 1362{
1346 struct net_device *netdev = card->netdev;
1347 struct sockaddr addr;
1348 unsigned int i;
1349 int status; 1363 int status;
1350 u64 v1, v2; 1364 u64 v1, v2;
1351 DECLARE_MAC_BUF(mac); 1365 DECLARE_MAC_BUF(mac);
1352 1366
1353 SET_NETDEV_DEV(netdev, &card->dev->core);
1354 spin_lock_init(&card->tx_dma_lock);
1355
1356 card->rx_csum = GELIC_NET_RX_CSUM_DEFAULT;
1357
1358 gelic_net_setup_netdev_ops(netdev);
1359
1360 netif_napi_add(netdev, &card->napi,
1361 gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
1362
1363 netdev->features = NETIF_F_IP_CSUM; 1367 netdev->features = NETIF_F_IP_CSUM;
1364 1368
1365 status = lv1_net_control(bus_id(card), dev_id(card), 1369 status = lv1_net_control(bus_id(card), dev_id(card),
1366 GELIC_NET_GET_MAC_ADDRESS, 1370 GELIC_LV1_GET_MAC_ADDRESS,
1367 0, 0, 0, &v1, &v2); 1371 0, 0, 0, &v1, &v2);
1372 v1 <<= 16;
1368 if (status || !is_valid_ether_addr((u8 *)&v1)) { 1373 if (status || !is_valid_ether_addr((u8 *)&v1)) {
1369 dev_info(ctodev(card), 1374 dev_info(ctodev(card),
1370 "%s:lv1_net_control GET_MAC_ADDR failed %d\n", 1375 "%s:lv1_net_control GET_MAC_ADDR failed %d\n",
1371 __func__, status); 1376 __func__, status);
1372 return -EINVAL; 1377 return -EINVAL;
1373 } 1378 }
1374 v1 <<= 16; 1379 memcpy(netdev->dev_addr, &v1, ETH_ALEN);
1375 memcpy(addr.sa_data, &v1, ETH_ALEN);
1376 memcpy(netdev->dev_addr, addr.sa_data, ETH_ALEN);
1377 dev_info(ctodev(card), "MAC addr %s\n",
1378 print_mac(mac, netdev->dev_addr));
1379 1380
1380 card->vlan_index = -1; /* no vlan */ 1381 if (card->vlan_required) {
1381 for (i = 0; i < GELIC_NET_VLAN_MAX; i++) {
1382 status = lv1_net_control(bus_id(card), dev_id(card),
1383 GELIC_NET_GET_VLAN_ID,
1384 i + 1, /* index; one based */
1385 0, 0, &v1, &v2);
1386 if (status == GELIC_NET_VLAN_NO_ENTRY) {
1387 dev_dbg(ctodev(card),
1388 "GELIC_VLAN_ID no entry:%d, VLAN disabled\n",
1389 status);
1390 card->vlan_id[i] = 0;
1391 } else if (status) {
1392 dev_dbg(ctodev(card),
1393 "%s:GELIC_NET_VLAN_ID faild, status=%d\n",
1394 __func__, status);
1395 card->vlan_id[i] = 0;
1396 } else {
1397 card->vlan_id[i] = (u32)v1;
1398 dev_dbg(ctodev(card), "vlan_id:%d, %lx\n", i, v1);
1399 }
1400 }
1401
1402 if (card->vlan_id[GELIC_NET_VLAN_WIRED - 1]) {
1403 card->vlan_index = GELIC_NET_VLAN_WIRED - 1;
1404 netdev->hard_header_len += VLAN_HLEN; 1382 netdev->hard_header_len += VLAN_HLEN;
1383 /*
1384 * As vlan is internally used,
1385 * we can not receive vlan packets
1386 */
1387 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1405 } 1388 }
1406 1389
1407 status = register_netdev(netdev); 1390 status = register_netdev(netdev);
1408 if (status) { 1391 if (status) {
1409 dev_err(ctodev(card), "%s:Couldn't register net_device: %d\n", 1392 dev_err(ctodev(card), "%s:Couldn't register %s %d\n",
1410 __func__, status); 1393 __func__, netdev->name, status);
1411 return status; 1394 return status;
1412 } 1395 }
1396 dev_info(ctodev(card), "%s: MAC addr %s\n",
1397 netdev->name,
1398 print_mac(mac, netdev->dev_addr));
1413 1399
1414 return 0; 1400 return 0;
1415} 1401}
1416 1402
1417/** 1403/**
1418 * gelic_net_alloc_card - allocates net_device and card structure 1404 * gelic_alloc_card_net - allocates net_device and card structure
1419 * 1405 *
1420 * returns the card structure or NULL in case of errors 1406 * returns the card structure or NULL in case of errors
1421 * 1407 *
1422 * the card and net_device structures are linked to each other 1408 * the card and net_device structures are linked to each other
1423 */ 1409 */
1424static struct gelic_net_card *gelic_net_alloc_card(void) 1410#define GELIC_ALIGN (32)
1411static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
1425{ 1412{
1426 struct net_device *netdev; 1413 struct gelic_card *card;
1427 struct gelic_net_card *card; 1414 struct gelic_port *port;
1415 void *p;
1428 size_t alloc_size; 1416 size_t alloc_size;
1429
1430 alloc_size = sizeof (*card) +
1431 sizeof (struct gelic_net_descr) * GELIC_NET_RX_DESCRIPTORS +
1432 sizeof (struct gelic_net_descr) * GELIC_NET_TX_DESCRIPTORS;
1433 /* 1417 /*
1434 * we assume private data is allocated 32 bytes (or more) aligned 1418 * gelic requires dma descriptor is 32 bytes aligned and
1435 * so that gelic_net_descr should be 32 bytes aligned. 1419 * the hypervisor requires irq_status is 8 bytes aligned.
1436 * Current alloc_etherdev() does do it because NETDEV_ALIGN
1437 * is 32.
1438 * check this assumption here.
1439 */ 1420 */
1440 BUILD_BUG_ON(NETDEV_ALIGN < 32); 1421 BUILD_BUG_ON(offsetof(struct gelic_card, irq_status) % 8);
1441 BUILD_BUG_ON(offsetof(struct gelic_net_card, irq_status) % 8); 1422 BUILD_BUG_ON(offsetof(struct gelic_card, descr) % 32);
1442 BUILD_BUG_ON(offsetof(struct gelic_net_card, descr) % 32); 1423 alloc_size =
1424 sizeof(struct gelic_card) +
1425 sizeof(struct gelic_descr) * GELIC_NET_RX_DESCRIPTORS +
1426 sizeof(struct gelic_descr) * GELIC_NET_TX_DESCRIPTORS +
1427 GELIC_ALIGN - 1;
1428
1429 p = kzalloc(alloc_size, GFP_KERNEL);
1430 if (!p)
1431 return NULL;
1432 card = PTR_ALIGN(p, GELIC_ALIGN);
1433 card->unalign = p;
1443 1434
1444 netdev = alloc_etherdev(alloc_size); 1435 /*
1445 if (!netdev) 1436 * alloc netdev
1437 */
1438 *netdev = alloc_etherdev(sizeof(struct gelic_port));
1439 if (!netdev) {
1440 kfree(card->unalign);
1446 return NULL; 1441 return NULL;
1442 }
1443 port = netdev_priv(*netdev);
1444
1445 /* gelic_port */
1446 port->netdev = *netdev;
1447 port->card = card;
1448 port->type = GELIC_PORT_ETHERNET;
1449
1450 /* gelic_card */
1451 card->netdev[GELIC_PORT_ETHERNET] = *netdev;
1447 1452
1448 card = netdev_priv(netdev);
1449 card->netdev = netdev;
1450 INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task); 1453 INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task);
1451 init_waitqueue_head(&card->waitq); 1454 init_waitqueue_head(&card->waitq);
1452 atomic_set(&card->tx_timeout_task_counter, 0); 1455 atomic_set(&card->tx_timeout_task_counter, 0);
1456 init_MUTEX(&card->updown_lock);
1457 atomic_set(&card->users, 0);
1453 1458
1454 return card; 1459 return card;
1455} 1460}
1456 1461
1462static void gelic_card_get_vlan_info(struct gelic_card *card)
1463{
1464 u64 v1, v2;
1465 int status;
1466 unsigned int i;
1467 struct {
1468 int tx;
1469 int rx;
1470 } vlan_id_ix[2] = {
1471 [GELIC_PORT_ETHERNET] = {
1472 .tx = GELIC_LV1_VLAN_TX_ETHERNET,
1473 .rx = GELIC_LV1_VLAN_RX_ETHERNET
1474 },
1475 [GELIC_PORT_WIRELESS] = {
1476 .tx = GELIC_LV1_VLAN_TX_WIRELESS,
1477 .rx = GELIC_LV1_VLAN_RX_WIRELESS
1478 }
1479 };
1480
1481 for (i = 0; i < ARRAY_SIZE(vlan_id_ix); i++) {
1482 /* tx tag */
1483 status = lv1_net_control(bus_id(card), dev_id(card),
1484 GELIC_LV1_GET_VLAN_ID,
1485 vlan_id_ix[i].tx,
1486 0, 0, &v1, &v2);
1487 if (status || !v1) {
1488 if (status != LV1_NO_ENTRY)
1489 dev_dbg(ctodev(card),
1490 "get vlan id for tx(%d) failed(%d)\n",
1491 vlan_id_ix[i].tx, status);
1492 card->vlan[i].tx = 0;
1493 card->vlan[i].rx = 0;
1494 continue;
1495 }
1496 card->vlan[i].tx = (u16)v1;
1497
1498 /* rx tag */
1499 status = lv1_net_control(bus_id(card), dev_id(card),
1500 GELIC_LV1_GET_VLAN_ID,
1501 vlan_id_ix[i].rx,
1502 0, 0, &v1, &v2);
1503 if (status || !v1) {
1504 if (status != LV1_NO_ENTRY)
1505 dev_info(ctodev(card),
1506 "get vlan id for rx(%d) failed(%d)\n",
1507 vlan_id_ix[i].rx, status);
1508 card->vlan[i].tx = 0;
1509 card->vlan[i].rx = 0;
1510 continue;
1511 }
1512 card->vlan[i].rx = (u16)v1;
1513
1514 dev_dbg(ctodev(card), "vlan_id[%d] tx=%02x rx=%02x\n",
1515 i, card->vlan[i].tx, card->vlan[i].rx);
1516 }
1517
1518 if (card->vlan[GELIC_PORT_ETHERNET].tx) {
1519 BUG_ON(!card->vlan[GELIC_PORT_WIRELESS].tx);
1520 card->vlan_required = 1;
1521 } else
1522 card->vlan_required = 0;
1523
1524 /* check wirelss capable firmware */
1525 if (ps3_compare_firmware_version(1, 6, 0) < 0) {
1526 card->vlan[GELIC_PORT_WIRELESS].tx = 0;
1527 card->vlan[GELIC_PORT_WIRELESS].rx = 0;
1528 }
1529
1530 dev_info(ctodev(card), "internal vlan %s\n",
1531 card->vlan_required? "enabled" : "disabled");
1532}
1457/** 1533/**
1458 * ps3_gelic_driver_probe - add a device to the control of this driver 1534 * ps3_gelic_driver_probe - add a device to the control of this driver
1459 */ 1535 */
1460static int ps3_gelic_driver_probe (struct ps3_system_bus_device *dev) 1536static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
1461{ 1537{
1462 struct gelic_net_card *card = gelic_net_alloc_card(); 1538 struct gelic_card *card;
1539 struct net_device *netdev;
1463 int result; 1540 int result;
1464 1541
1465 if (!card) { 1542 pr_debug("%s: called\n", __func__);
1466 dev_info(&dev->core, "gelic_net_alloc_card failed\n");
1467 result = -ENOMEM;
1468 goto fail_alloc_card;
1469 }
1470
1471 ps3_system_bus_set_driver_data(dev, card);
1472 card->dev = dev;
1473
1474 result = ps3_open_hv_device(dev); 1543 result = ps3_open_hv_device(dev);
1475 1544
1476 if (result) { 1545 if (result) {
1477 dev_dbg(&dev->core, "ps3_open_hv_device failed\n"); 1546 dev_dbg(&dev->core, "%s:ps3_open_hv_device failed\n",
1547 __func__);
1478 goto fail_open; 1548 goto fail_open;
1479 } 1549 }
1480 1550
1481 result = ps3_dma_region_create(dev->d_region); 1551 result = ps3_dma_region_create(dev->d_region);
1482 1552
1483 if (result) { 1553 if (result) {
1484 dev_dbg(&dev->core, "ps3_dma_region_create failed(%d)\n", 1554 dev_dbg(&dev->core, "%s:ps3_dma_region_create failed(%d)\n",
1485 result); 1555 __func__, result);
1486 BUG_ON("check region type"); 1556 BUG_ON("check region type");
1487 goto fail_dma_region; 1557 goto fail_dma_region;
1488 } 1558 }
1489 1559
1560 /* alloc card/netdevice */
1561 card = gelic_alloc_card_net(&netdev);
1562 if (!card) {
1563 dev_info(&dev->core, "%s:gelic_net_alloc_card failed\n",
1564 __func__);
1565 result = -ENOMEM;
1566 goto fail_alloc_card;
1567 }
1568 ps3_system_bus_set_driver_data(dev, card);
1569 card->dev = dev;
1570
1571 /* get internal vlan info */
1572 gelic_card_get_vlan_info(card);
1573
1574 /* setup interrupt */
1490 result = lv1_net_set_interrupt_status_indicator(bus_id(card), 1575 result = lv1_net_set_interrupt_status_indicator(bus_id(card),
1491 dev_id(card), 1576 dev_id(card),
1492 ps3_mm_phys_to_lpar(__pa(&card->irq_status)), 1577 ps3_mm_phys_to_lpar(__pa(&card->irq_status)),
@@ -1494,34 +1579,101 @@ static int ps3_gelic_driver_probe (struct ps3_system_bus_device *dev)
1494 1579
1495 if (result) { 1580 if (result) {
1496 dev_dbg(&dev->core, 1581 dev_dbg(&dev->core,
1497 "lv1_net_set_interrupt_status_indicator failed: %s\n", 1582 "%s:set_interrupt_status_indicator failed: %s\n",
1498 ps3_result(result)); 1583 __func__, ps3_result(result));
1499 result = -EIO; 1584 result = -EIO;
1500 goto fail_status_indicator; 1585 goto fail_status_indicator;
1501 } 1586 }
1502 1587
1503 result = gelic_net_setup_netdev(card); 1588 result = ps3_sb_event_receive_port_setup(dev, PS3_BINDING_CPU_ANY,
1589 &card->irq);
1590
1591 if (result) {
1592 dev_info(ctodev(card),
1593 "%s:gelic_net_open_device failed (%d)\n",
1594 __func__, result);
1595 result = -EPERM;
1596 goto fail_alloc_irq;
1597 }
1598 result = request_irq(card->irq, gelic_card_interrupt,
1599 IRQF_DISABLED, netdev->name, card);
1600
1601 if (result) {
1602 dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
1603 __func__, result);
1604 goto fail_request_irq;
1605 }
1606
1607 /* setup card structure */
1608 card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT |
1609 GELIC_CARD_PORT_STATUS_CHANGED;
1610 card->rx_csum = GELIC_CARD_RX_CSUM_DEFAULT;
1504 1611
1612
1613 if (gelic_card_init_chain(card, &card->tx_chain,
1614 card->descr, GELIC_NET_TX_DESCRIPTORS))
1615 goto fail_alloc_tx;
1616 if (gelic_card_init_chain(card, &card->rx_chain,
1617 card->descr + GELIC_NET_TX_DESCRIPTORS,
1618 GELIC_NET_RX_DESCRIPTORS))
1619 goto fail_alloc_rx;
1620
1621 /* head of chain */
1622 card->tx_top = card->tx_chain.head;
1623 card->rx_top = card->rx_chain.head;
1624 dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n",
1625 card->rx_top, card->tx_top, sizeof(struct gelic_descr),
1626 GELIC_NET_RX_DESCRIPTORS);
1627 /* allocate rx skbs */
1628 if (gelic_card_alloc_rx_skbs(card))
1629 goto fail_alloc_skbs;
1630
1631 spin_lock_init(&card->tx_lock);
1632 card->tx_dma_progress = 0;
1633
1634 /* setup net_device structure */
1635 netdev->irq = card->irq;
1636 SET_NETDEV_DEV(netdev, &card->dev->core);
1637 gelic_ether_setup_netdev_ops(netdev, &card->napi);
1638 result = gelic_net_setup_netdev(netdev, card);
1505 if (result) { 1639 if (result) {
1506 dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: " 1640 dev_dbg(&dev->core, "%s: setup_netdev failed %d",
1507 "(%d)\n", __func__, __LINE__, result); 1641 __func__, result);
1508 goto fail_setup_netdev; 1642 goto fail_setup_netdev;
1509 } 1643 }
1510 1644
1645#ifdef CONFIG_GELIC_WIRELESS
1646 if (gelic_wl_driver_probe(card)) {
1647 dev_dbg(&dev->core, "%s: WL init failed\n", __func__);
1648 goto fail_setup_netdev;
1649 }
1650#endif
1651 pr_debug("%s: done\n", __func__);
1511 return 0; 1652 return 0;
1512 1653
1513fail_setup_netdev: 1654fail_setup_netdev:
1655fail_alloc_skbs:
1656 gelic_card_free_chain(card, card->rx_chain.head);
1657fail_alloc_rx:
1658 gelic_card_free_chain(card, card->tx_chain.head);
1659fail_alloc_tx:
1660 free_irq(card->irq, card);
1661 netdev->irq = NO_IRQ;
1662fail_request_irq:
1663 ps3_sb_event_receive_port_destroy(dev, card->irq);
1664fail_alloc_irq:
1514 lv1_net_set_interrupt_status_indicator(bus_id(card), 1665 lv1_net_set_interrupt_status_indicator(bus_id(card),
1515 bus_id(card), 1666 bus_id(card),
1516 0 , 0); 1667 0, 0);
1517fail_status_indicator: 1668fail_status_indicator:
1669 ps3_system_bus_set_driver_data(dev, NULL);
1670 kfree(netdev_card(netdev)->unalign);
1671 free_netdev(netdev);
1672fail_alloc_card:
1518 ps3_dma_region_free(dev->d_region); 1673 ps3_dma_region_free(dev->d_region);
1519fail_dma_region: 1674fail_dma_region:
1520 ps3_close_hv_device(dev); 1675 ps3_close_hv_device(dev);
1521fail_open: 1676fail_open:
1522 ps3_system_bus_set_driver_data(dev, NULL);
1523 free_netdev(card->netdev);
1524fail_alloc_card:
1525 return result; 1677 return result;
1526} 1678}
1527 1679
@@ -1529,9 +1681,34 @@ fail_alloc_card:
1529 * ps3_gelic_driver_remove - remove a device from the control of this driver 1681 * ps3_gelic_driver_remove - remove a device from the control of this driver
1530 */ 1682 */
1531 1683
1532static int ps3_gelic_driver_remove (struct ps3_system_bus_device *dev) 1684static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
1533{ 1685{
1534 struct gelic_net_card *card = ps3_system_bus_get_driver_data(dev); 1686 struct gelic_card *card = ps3_system_bus_get_driver_data(dev);
1687 struct net_device *netdev0;
1688 pr_debug("%s: called\n", __func__);
1689
1690#ifdef CONFIG_GELIC_WIRELESS
1691 gelic_wl_driver_remove(card);
1692#endif
1693 /* stop interrupt */
1694 gelic_card_set_irq_mask(card, 0);
1695
1696 /* turn off DMA, force end */
1697 gelic_card_disable_rxdmac(card);
1698 gelic_card_disable_txdmac(card);
1699
1700 /* release chains */
1701 gelic_card_release_tx_chain(card, 1);
1702 gelic_card_release_rx_chain(card);
1703
1704 gelic_card_free_chain(card, card->tx_top);
1705 gelic_card_free_chain(card, card->rx_top);
1706
1707 netdev0 = card->netdev[GELIC_PORT_ETHERNET];
1708 /* disconnect event port */
1709 free_irq(card->irq, card);
1710 netdev0->irq = NO_IRQ;
1711 ps3_sb_event_receive_port_destroy(card->dev, card->irq);
1535 1712
1536 wait_event(card->waitq, 1713 wait_event(card->waitq,
1537 atomic_read(&card->tx_timeout_task_counter) == 0); 1714 atomic_read(&card->tx_timeout_task_counter) == 0);
@@ -1539,8 +1716,9 @@ static int ps3_gelic_driver_remove (struct ps3_system_bus_device *dev)
1539 lv1_net_set_interrupt_status_indicator(bus_id(card), dev_id(card), 1716 lv1_net_set_interrupt_status_indicator(bus_id(card), dev_id(card),
1540 0 , 0); 1717 0 , 0);
1541 1718
1542 unregister_netdev(card->netdev); 1719 unregister_netdev(netdev0);
1543 free_netdev(card->netdev); 1720 kfree(netdev_card(netdev0)->unalign);
1721 free_netdev(netdev0);
1544 1722
1545 ps3_system_bus_set_driver_data(dev, NULL); 1723 ps3_system_bus_set_driver_data(dev, NULL);
1546 1724
@@ -1548,6 +1726,7 @@ static int ps3_gelic_driver_remove (struct ps3_system_bus_device *dev)
1548 1726
1549 ps3_close_hv_device(dev); 1727 ps3_close_hv_device(dev);
1550 1728
1729 pr_debug("%s: done\n", __func__);
1551 return 0; 1730 return 0;
1552} 1731}
1553 1732
@@ -1572,8 +1751,8 @@ static void __exit ps3_gelic_driver_exit (void)
1572 ps3_system_bus_driver_unregister(&ps3_gelic_driver); 1751 ps3_system_bus_driver_unregister(&ps3_gelic_driver);
1573} 1752}
1574 1753
1575module_init (ps3_gelic_driver_init); 1754module_init(ps3_gelic_driver_init);
1576module_exit (ps3_gelic_driver_exit); 1755module_exit(ps3_gelic_driver_exit);
1577 1756
1578MODULE_ALIAS(PS3_MODULE_ALIAS_GELIC); 1757MODULE_ALIAS(PS3_MODULE_ALIAS_GELIC);
1579 1758
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index 968560269a3b..1d39d06797e4 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -35,198 +35,323 @@
35#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN 35#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
36#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN 36#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
37#define GELIC_NET_RXBUF_ALIGN 128 37#define GELIC_NET_RXBUF_ALIGN 128
38#define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */ 38#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ 39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
40#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) 40#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
41#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL 41#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
42#define GELIC_NET_VLAN_POS (VLAN_ETH_ALEN * 2) 42
43#define GELIC_NET_VLAN_MAX 4
44#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ 43#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
45 44
46enum gelic_net_int0_status { 45/* virtual interrupt status register bits */
47 GELIC_NET_GDTDCEINT = 24, 46 /* INT1 */
48 GELIC_NET_GRFANMINT = 28, 47#define GELIC_CARD_TX_RAM_FULL_ERR 0x0000000000000001L
49}; 48#define GELIC_CARD_RX_RAM_FULL_ERR 0x0000000000000002L
49#define GELIC_CARD_TX_SHORT_FRAME_ERR 0x0000000000000004L
50#define GELIC_CARD_TX_INVALID_DESCR_ERR 0x0000000000000008L
51#define GELIC_CARD_RX_FIFO_FULL_ERR 0x0000000000002000L
52#define GELIC_CARD_RX_DESCR_CHAIN_END 0x0000000000004000L
53#define GELIC_CARD_RX_INVALID_DESCR_ERR 0x0000000000008000L
54#define GELIC_CARD_TX_RESPONCE_ERR 0x0000000000010000L
55#define GELIC_CARD_RX_RESPONCE_ERR 0x0000000000100000L
56#define GELIC_CARD_TX_PROTECTION_ERR 0x0000000000400000L
57#define GELIC_CARD_RX_PROTECTION_ERR 0x0000000004000000L
58#define GELIC_CARD_TX_TCP_UDP_CHECKSUM_ERR 0x0000000008000000L
59#define GELIC_CARD_PORT_STATUS_CHANGED 0x0000000020000000L
60#define GELIC_CARD_WLAN_EVENT_RECEIVED 0x0000000040000000L
61#define GELIC_CARD_WLAN_COMMAND_COMPLETED 0x0000000080000000L
62 /* INT 0 */
63#define GELIC_CARD_TX_FLAGGED_DESCR 0x0004000000000000L
64#define GELIC_CARD_RX_FLAGGED_DESCR 0x0040000000000000L
65#define GELIC_CARD_TX_TRANSFER_END 0x0080000000000000L
66#define GELIC_CARD_TX_DESCR_CHAIN_END 0x0100000000000000L
67#define GELIC_CARD_NUMBER_OF_RX_FRAME 0x1000000000000000L
68#define GELIC_CARD_ONE_TIME_COUNT_TIMER 0x4000000000000000L
69#define GELIC_CARD_FREE_RUN_COUNT_TIMER 0x8000000000000000L
70
71/* initial interrupt mask */
72#define GELIC_CARD_TXINT GELIC_CARD_TX_DESCR_CHAIN_END
50 73
51/* GHIINT1STS bits */ 74#define GELIC_CARD_RXINT (GELIC_CARD_RX_DESCR_CHAIN_END | \
52enum gelic_net_int1_status { 75 GELIC_CARD_NUMBER_OF_RX_FRAME)
53 GELIC_NET_GDADCEINT = 14, 76
77 /* RX descriptor data_status bits */
78enum gelic_descr_rx_status {
79 GELIC_DESCR_RXDMADU = 0x80000000, /* destination MAC addr unknown */
80 GELIC_DESCR_RXLSTFBF = 0x40000000, /* last frame buffer */
81 GELIC_DESCR_RXIPCHK = 0x20000000, /* IP checksum performed */
82 GELIC_DESCR_RXTCPCHK = 0x10000000, /* TCP/UDP checksup performed */
83 GELIC_DESCR_RXWTPKT = 0x00C00000, /*
84 * wakeup trigger packet
85 * 01: Magic Packet (TM)
86 * 10: ARP packet
87 * 11: Multicast MAC addr
88 */
89 GELIC_DESCR_RXVLNPKT = 0x00200000, /* VLAN packet */
90 /* bit 20..16 reserved */
91 GELIC_DESCR_RXRRECNUM = 0x0000ff00, /* reception receipt number */
92 /* bit 7..0 reserved */
54}; 93};
55 94
56/* interrupt mask */ 95#define GELIC_DESCR_DATA_STATUS_CHK_MASK \
57#define GELIC_NET_TXINT (1L << (GELIC_NET_GDTDCEINT + 32)) 96 (GELIC_DESCR_RXIPCHK | GELIC_DESCR_RXTCPCHK)
58 97
59#define GELIC_NET_RXINT0 (1L << (GELIC_NET_GRFANMINT + 32)) 98 /* TX descriptor data_status bits */
60#define GELIC_NET_RXINT1 (1L << GELIC_NET_GDADCEINT) 99enum gelic_descr_tx_status {
61#define GELIC_NET_RXINT (GELIC_NET_RXINT0 | GELIC_NET_RXINT1) 100 GELIC_DESCR_TX_TAIL = 0x00000001, /* gelic treated this
101 * descriptor was end of
102 * a tx frame
103 */
104};
62 105
63 /* RX descriptor data_status bits */ 106/* RX descriptor data error bits */
64#define GELIC_NET_RXDMADU 0x80000000 /* destination MAC addr unknown */ 107enum gelic_descr_rx_error {
65#define GELIC_NET_RXLSTFBF 0x40000000 /* last frame buffer */ 108 /* bit 31 reserved */
66#define GELIC_NET_RXIPCHK 0x20000000 /* IP checksum performed */ 109 GELIC_DESCR_RXALNERR = 0x40000000, /* alignement error 10/100M */
67#define GELIC_NET_RXTCPCHK 0x10000000 /* TCP/UDP checksup performed */ 110 GELIC_DESCR_RXOVERERR = 0x20000000, /* oversize error */
68#define GELIC_NET_RXIPSPKT 0x08000000 /* IPsec packet */ 111 GELIC_DESCR_RXRNTERR = 0x10000000, /* Runt error */
69#define GELIC_NET_RXIPSAHPRT 0x04000000 /* IPsec AH protocol performed */ 112 GELIC_DESCR_RXIPCHKERR = 0x08000000, /* IP checksum error */
70#define GELIC_NET_RXIPSESPPRT 0x02000000 /* IPsec ESP protocol performed */ 113 GELIC_DESCR_RXTCPCHKERR = 0x04000000, /* TCP/UDP checksum error */
71#define GELIC_NET_RXSESPAH 0x01000000 /* 114 GELIC_DESCR_RXDRPPKT = 0x00100000, /* drop packet */
72 * IPsec ESP protocol auth 115 GELIC_DESCR_RXIPFMTERR = 0x00080000, /* IP packet format error */
73 * performed 116 /* bit 18 reserved */
74 */ 117 GELIC_DESCR_RXDATAERR = 0x00020000, /* IP packet format error */
75 118 GELIC_DESCR_RXCALERR = 0x00010000, /* cariier extension length
76#define GELIC_NET_RXWTPKT 0x00C00000 /* 119 * error */
77 * wakeup trigger packet 120 GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extention error */
78 * 01: Magic Packet (TM) 121 GELIC_DESCR_RXMLTCST = 0x00004000, /* multicast address frame */
79 * 10: ARP packet 122 /* bit 13..0 reserved */
80 * 11: Multicast MAC addr 123};
81 */ 124#define GELIC_DESCR_DATA_ERROR_CHK_MASK \
82#define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */ 125 (GELIC_DESCR_RXIPCHKERR | GELIC_DESCR_RXTCPCHKERR)
83/* bit 20..16 reserved */
84#define GELIC_NET_RXRRECNUM 0x0000ff00 /* reception receipt number */
85#define GELIC_NET_RXRRECNUM_SHIFT 8
86/* bit 7..0 reserved */
87
88#define GELIC_NET_TXDESC_TAIL 0
89#define GELIC_NET_DATA_STATUS_CHK_MASK (GELIC_NET_RXIPCHK | GELIC_NET_RXTCPCHK)
90
91/* RX descriptor data_error bits */
92/* bit 31 reserved */
93#define GELIC_NET_RXALNERR 0x40000000 /* alignement error 10/100M */
94#define GELIC_NET_RXOVERERR 0x20000000 /* oversize error */
95#define GELIC_NET_RXRNTERR 0x10000000 /* Runt error */
96#define GELIC_NET_RXIPCHKERR 0x08000000 /* IP checksum error */
97#define GELIC_NET_RXTCPCHKERR 0x04000000 /* TCP/UDP checksum error */
98#define GELIC_NET_RXUMCHSP 0x02000000 /* unmatched sp on sp */
99#define GELIC_NET_RXUMCHSPI 0x01000000 /* unmatched SPI on SAD */
100#define GELIC_NET_RXUMCHSAD 0x00800000 /* unmatched SAD */
101#define GELIC_NET_RXIPSAHERR 0x00400000 /* auth error on AH protocol
102 * processing */
103#define GELIC_NET_RXIPSESPAHERR 0x00200000 /* auth error on ESP protocol
104 * processing */
105#define GELIC_NET_RXDRPPKT 0x00100000 /* drop packet */
106#define GELIC_NET_RXIPFMTERR 0x00080000 /* IP packet format error */
107/* bit 18 reserved */
108#define GELIC_NET_RXDATAERR 0x00020000 /* IP packet format error */
109#define GELIC_NET_RXCALERR 0x00010000 /* cariier extension length
110 * error */
111#define GELIC_NET_RXCREXERR 0x00008000 /* carrier extention error */
112#define GELIC_NET_RXMLTCST 0x00004000 /* multicast address frame */
113/* bit 13..0 reserved */
114#define GELIC_NET_DATA_ERROR_CHK_MASK \
115 (GELIC_NET_RXIPCHKERR | GELIC_NET_RXTCPCHKERR)
116 126
127/* DMA command and status (RX and TX)*/
128enum gelic_descr_dma_status {
129 GELIC_DESCR_DMA_COMPLETE = 0x00000000, /* used in tx */
130 GELIC_DESCR_DMA_BUFFER_FULL = 0x00000000, /* used in rx */
131 GELIC_DESCR_DMA_RESPONSE_ERROR = 0x10000000, /* used in rx, tx */
132 GELIC_DESCR_DMA_PROTECTION_ERROR = 0x20000000, /* used in rx, tx */
133 GELIC_DESCR_DMA_FRAME_END = 0x40000000, /* used in rx */
134 GELIC_DESCR_DMA_FORCE_END = 0x50000000, /* used in rx, tx */
135 GELIC_DESCR_DMA_CARDOWNED = 0xa0000000, /* used in rx, tx */
136 GELIC_DESCR_DMA_NOT_IN_USE = 0xb0000000, /* any other value */
137};
138
139#define GELIC_DESCR_DMA_STAT_MASK (0xf0000000)
117 140
118/* tx descriptor command and status */ 141/* tx descriptor command and status */
119#define GELIC_NET_DMAC_CMDSTAT_NOCS 0xa0080000 /* middle of frame */ 142enum gelic_descr_tx_dma_status {
120#define GELIC_NET_DMAC_CMDSTAT_TCPCS 0xa00a0000 143 /* [19] */
121#define GELIC_NET_DMAC_CMDSTAT_UDPCS 0xa00b0000 144 GELIC_DESCR_TX_DMA_IKE = 0x00080000, /* IPSEC off */
122#define GELIC_NET_DMAC_CMDSTAT_END_FRAME 0x00040000 /* end of frame */ 145 /* [18] */
123 146 GELIC_DESCR_TX_DMA_FRAME_TAIL = 0x00040000, /* last descriptor of
124#define GELIC_NET_DMAC_CMDSTAT_RXDCEIS 0x00000002 /* descriptor chain end 147 * the packet
125 * interrupt status */ 148 */
126 149 /* [17..16] */
127#define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */ 150 GELIC_DESCR_TX_DMA_TCP_CHKSUM = 0x00020000, /* TCP packet */
128#define GELIC_NET_DESCR_IND_PROC_SHIFT 28 151 GELIC_DESCR_TX_DMA_UDP_CHKSUM = 0x00030000, /* UDP packet */
129#define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff 152 GELIC_DESCR_TX_DMA_NO_CHKSUM = 0x00000000, /* no checksum */
130 153
131 154 /* [1] */
132enum gelic_net_descr_status { 155 GELIC_DESCR_TX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
133 GELIC_NET_DESCR_COMPLETE = 0x00, /* used in tx */ 156 * due to chain end
134 GELIC_NET_DESCR_BUFFER_FULL = 0x00, /* used in rx */ 157 */
135 GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
136 GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
137 GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */
138 GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
139 GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
140 GELIC_NET_DESCR_NOT_IN_USE = 0x0b /* any other value */
141}; 158};
159
160#define GELIC_DESCR_DMA_CMD_NO_CHKSUM \
161 (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
162 GELIC_DESCR_TX_DMA_NO_CHKSUM)
163
164#define GELIC_DESCR_DMA_CMD_TCP_CHKSUM \
165 (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
166 GELIC_DESCR_TX_DMA_TCP_CHKSUM)
167
168#define GELIC_DESCR_DMA_CMD_UDP_CHKSUM \
169 (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \
170 GELIC_DESCR_TX_DMA_UDP_CHKSUM)
171
172enum gelic_descr_rx_dma_status {
173 /* [ 1 ] */
174 GELIC_DESCR_RX_DMA_CHAIN_END = 0x00000002, /* DMA terminated
175 * due to chain end
176 */
177};
178
142/* for lv1_net_control */ 179/* for lv1_net_control */
143#define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001 180enum gelic_lv1_net_control_code {
144#define GELIC_NET_GET_ETH_PORT_STATUS 0x0000000000000002 181 GELIC_LV1_GET_MAC_ADDRESS = 1,
145#define GELIC_NET_SET_NEGOTIATION_MODE 0x0000000000000003 182 GELIC_LV1_GET_ETH_PORT_STATUS = 2,
146#define GELIC_NET_GET_VLAN_ID 0x0000000000000004 183 GELIC_LV1_SET_NEGOTIATION_MODE = 3,
147 184 GELIC_LV1_GET_VLAN_ID = 4,
148#define GELIC_NET_LINK_UP 0x0000000000000001 185 GELIC_LV1_GET_CHANNEL = 6,
149#define GELIC_NET_FULL_DUPLEX 0x0000000000000002 186 GELIC_LV1_POST_WLAN_CMD = 9,
150#define GELIC_NET_AUTO_NEG 0x0000000000000004 187 GELIC_LV1_GET_WLAN_CMD_RESULT = 10,
151#define GELIC_NET_SPEED_10 0x0000000000000010 188 GELIC_LV1_GET_WLAN_EVENT = 11
152#define GELIC_NET_SPEED_100 0x0000000000000020 189};
153#define GELIC_NET_SPEED_1000 0x0000000000000040 190
154 191/* status returened from GET_ETH_PORT_STATUS */
155#define GELIC_NET_VLAN_ALL 0x0000000000000001 192enum gelic_lv1_ether_port_status {
156#define GELIC_NET_VLAN_WIRED 0x0000000000000002 193 GELIC_LV1_ETHER_LINK_UP = 0x0000000000000001L,
157#define GELIC_NET_VLAN_WIRELESS 0x0000000000000003 194 GELIC_LV1_ETHER_FULL_DUPLEX = 0x0000000000000002L,
158#define GELIC_NET_VLAN_PSP 0x0000000000000004 195 GELIC_LV1_ETHER_AUTO_NEG = 0x0000000000000004L,
159#define GELIC_NET_VLAN_PORT0 0x0000000000000010 196
160#define GELIC_NET_VLAN_PORT1 0x0000000000000011 197 GELIC_LV1_ETHER_SPEED_10 = 0x0000000000000010L,
161#define GELIC_NET_VLAN_PORT2 0x0000000000000012 198 GELIC_LV1_ETHER_SPEED_100 = 0x0000000000000020L,
162#define GELIC_NET_VLAN_DAEMON_CLIENT_BSS 0x0000000000000013 199 GELIC_LV1_ETHER_SPEED_1000 = 0x0000000000000040L,
163#define GELIC_NET_VLAN_LIBERO_CLIENT_BSS 0x0000000000000014 200 GELIC_LV1_ETHER_SPEED_MASK = 0x0000000000000070L
164#define GELIC_NET_VLAN_NO_ENTRY -6 201};
165 202
166#define GELIC_NET_PORT 2 /* for port status */ 203enum gelic_lv1_vlan_index {
204 /* for outgoing packets */
205 GELIC_LV1_VLAN_TX_ETHERNET = 0x0000000000000002L,
206 GELIC_LV1_VLAN_TX_WIRELESS = 0x0000000000000003L,
207 /* for incoming packets */
208 GELIC_LV1_VLAN_RX_ETHERNET = 0x0000000000000012L,
209 GELIC_LV1_VLAN_RX_WIRELESS = 0x0000000000000013L
210};
167 211
168/* size of hardware part of gelic descriptor */ 212/* size of hardware part of gelic descriptor */
169#define GELIC_NET_DESCR_SIZE (32) 213#define GELIC_DESCR_SIZE (32)
170struct gelic_net_descr { 214
215enum gelic_port_type {
216 GELIC_PORT_ETHERNET = 0,
217 GELIC_PORT_WIRELESS = 1,
218 GELIC_PORT_MAX
219};
220
221struct gelic_descr {
171 /* as defined by the hardware */ 222 /* as defined by the hardware */
172 u32 buf_addr; 223 __be32 buf_addr;
173 u32 buf_size; 224 __be32 buf_size;
174 u32 next_descr_addr; 225 __be32 next_descr_addr;
175 u32 dmac_cmd_status; 226 __be32 dmac_cmd_status;
176 u32 result_size; 227 __be32 result_size;
177 u32 valid_size; /* all zeroes for tx */ 228 __be32 valid_size; /* all zeroes for tx */
178 u32 data_status; 229 __be32 data_status;
179 u32 data_error; /* all zeroes for tx */ 230 __be32 data_error; /* all zeroes for tx */
180 231
181 /* used in the driver */ 232 /* used in the driver */
182 struct sk_buff *skb; 233 struct sk_buff *skb;
183 dma_addr_t bus_addr; 234 dma_addr_t bus_addr;
184 struct gelic_net_descr *next; 235 struct gelic_descr *next;
185 struct gelic_net_descr *prev; 236 struct gelic_descr *prev;
186 struct vlan_ethhdr vlan;
187} __attribute__((aligned(32))); 237} __attribute__((aligned(32)));
188 238
189struct gelic_net_descr_chain { 239struct gelic_descr_chain {
190 /* we walk from tail to head */ 240 /* we walk from tail to head */
191 struct gelic_net_descr *head; 241 struct gelic_descr *head;
192 struct gelic_net_descr *tail; 242 struct gelic_descr *tail;
193}; 243};
194 244
195struct gelic_net_card { 245struct gelic_vlan_id {
196 struct net_device *netdev; 246 u16 tx;
247 u16 rx;
248};
249
250struct gelic_card {
197 struct napi_struct napi; 251 struct napi_struct napi;
252 struct net_device *netdev[GELIC_PORT_MAX];
198 /* 253 /*
199 * hypervisor requires irq_status should be 254 * hypervisor requires irq_status should be
200 * 8 bytes aligned, but u64 member is 255 * 8 bytes aligned, but u64 member is
201 * always disposed in that manner 256 * always disposed in that manner
202 */ 257 */
203 u64 irq_status; 258 u64 irq_status;
204 u64 ghiintmask; 259 u64 irq_mask;
205 260
206 struct ps3_system_bus_device *dev; 261 struct ps3_system_bus_device *dev;
207 u32 vlan_id[GELIC_NET_VLAN_MAX]; 262 struct gelic_vlan_id vlan[GELIC_PORT_MAX];
208 int vlan_index; 263 int vlan_required;
209 264
210 struct gelic_net_descr_chain tx_chain; 265 struct gelic_descr_chain tx_chain;
211 struct gelic_net_descr_chain rx_chain; 266 struct gelic_descr_chain rx_chain;
212 int rx_dma_restart_required; 267 int rx_dma_restart_required;
213 /* gurad dmac descriptor chain*/
214 spinlock_t chain_lock;
215
216 int rx_csum; 268 int rx_csum;
217 /* guard tx_dma_progress */ 269 /*
218 spinlock_t tx_dma_lock; 270 * tx_lock guards tx descriptor list and
271 * tx_dma_progress.
272 */
273 spinlock_t tx_lock;
219 int tx_dma_progress; 274 int tx_dma_progress;
220 275
221 struct work_struct tx_timeout_task; 276 struct work_struct tx_timeout_task;
222 atomic_t tx_timeout_task_counter; 277 atomic_t tx_timeout_task_counter;
223 wait_queue_head_t waitq; 278 wait_queue_head_t waitq;
224 279
225 struct gelic_net_descr *tx_top, *rx_top; 280 /* only first user should up the card */
226 struct gelic_net_descr descr[0]; 281 struct semaphore updown_lock;
282 atomic_t users;
283
284 u64 ether_port_status;
285 /* original address returned by kzalloc */
286 void *unalign;
287
288 /*
289 * each netdevice has copy of irq
290 */
291 unsigned int irq;
292 struct gelic_descr *tx_top, *rx_top;
293 struct gelic_descr descr[0]; /* must be the last */
294};
295
296struct gelic_port {
297 struct gelic_card *card;
298 struct net_device *netdev;
299 enum gelic_port_type type;
300 long priv[0]; /* long for alignment */
227}; 301};
228 302
303static inline struct gelic_card *port_to_card(struct gelic_port *p)
304{
305 return p->card;
306}
307static inline struct net_device *port_to_netdev(struct gelic_port *p)
308{
309 return p->netdev;
310}
311static inline struct gelic_card *netdev_card(struct net_device *d)
312{
313 return ((struct gelic_port *)netdev_priv(d))->card;
314}
315static inline struct gelic_port *netdev_port(struct net_device *d)
316{
317 return (struct gelic_port *)netdev_priv(d);
318}
319static inline struct device *ctodev(struct gelic_card *card)
320{
321 return &card->dev->core;
322}
323static inline u64 bus_id(struct gelic_card *card)
324{
325 return card->dev->bus_id;
326}
327static inline u64 dev_id(struct gelic_card *card)
328{
329 return card->dev->dev_id;
330}
331
332static inline void *port_priv(struct gelic_port *port)
333{
334 return port->priv;
335}
336
337extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
338/* shared netdev ops */
339extern void gelic_card_up(struct gelic_card *card);
340extern void gelic_card_down(struct gelic_card *card);
341extern int gelic_net_open(struct net_device *netdev);
342extern int gelic_net_stop(struct net_device *netdev);
343extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
344extern void gelic_net_set_multi(struct net_device *netdev);
345extern void gelic_net_tx_timeout(struct net_device *netdev);
346extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
347extern int gelic_net_setup_netdev(struct net_device *netdev,
348 struct gelic_card *card);
229 349
230extern unsigned long p_to_lp(long pa); 350/* shared ethtool ops */
351extern void gelic_net_get_drvinfo(struct net_device *netdev,
352 struct ethtool_drvinfo *info);
353extern u32 gelic_net_get_rx_csum(struct net_device *netdev);
354extern int gelic_net_set_rx_csum(struct net_device *netdev, u32 data);
355extern void gelic_net_poll_controller(struct net_device *netdev);
231 356
232#endif /* _GELIC_NET_H */ 357#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
new file mode 100644
index 000000000000..750d2a99cb4f
--- /dev/null
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -0,0 +1,2753 @@
1/*
2 * PS3 gelic network driver.
3 *
4 * Copyright (C) 2007 Sony Computer Entertainment Inc.
5 * Copyright 2007 Sony Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#undef DEBUG
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24
25#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/if_vlan.h>
28
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/wireless.h>
33#include <linux/ctype.h>
34#include <linux/string.h>
35#include <net/iw_handler.h>
36#include <net/ieee80211.h>
37
38#include <linux/dma-mapping.h>
39#include <net/checksum.h>
40#include <asm/firmware.h>
41#include <asm/ps3.h>
42#include <asm/lv1call.h>
43
44#include "ps3_gelic_net.h"
45#include "ps3_gelic_wireless.h"
46
47
48static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan);
49static int gelic_wl_try_associate(struct net_device *netdev);
50
51/*
52 * tables
53 */
54
55/* 802.11b/g channel to freq in MHz */
56static const int channel_freq[] = {
57 2412, 2417, 2422, 2427, 2432,
58 2437, 2442, 2447, 2452, 2457,
59 2462, 2467, 2472, 2484
60};
61#define NUM_CHANNELS ARRAY_SIZE(channel_freq)
62
63/* in bps */
64static const int bitrate_list[] = {
65 1000000,
66 2000000,
67 5500000,
68 11000000,
69 6000000,
70 9000000,
71 12000000,
72 18000000,
73 24000000,
74 36000000,
75 48000000,
76 54000000
77};
78#define NUM_BITRATES ARRAY_SIZE(bitrate_list)
79
80/*
81 * wpa2 support requires the hypervisor version 2.0 or later
82 */
83static inline int wpa2_capable(void)
84{
85 return (0 <= ps3_compare_firmware_version(2, 0, 0));
86}
87
88static inline int precise_ie(void)
89{
90 return 0; /* FIXME */
91}
92/*
93 * post_eurus_cmd helpers
94 */
95struct eurus_cmd_arg_info {
96 int pre_arg; /* command requres arg1, arg2 at POST COMMAND */
97 int post_arg; /* command requires arg1, arg2 at GET_RESULT */
98};
99
100static const struct eurus_cmd_arg_info cmd_info[GELIC_EURUS_CMD_MAX_INDEX] = {
101 [GELIC_EURUS_CMD_SET_COMMON_CFG] = { .pre_arg = 1},
102 [GELIC_EURUS_CMD_SET_WEP_CFG] = { .pre_arg = 1},
103 [GELIC_EURUS_CMD_SET_WPA_CFG] = { .pre_arg = 1},
104 [GELIC_EURUS_CMD_GET_COMMON_CFG] = { .post_arg = 1},
105 [GELIC_EURUS_CMD_GET_WEP_CFG] = { .post_arg = 1},
106 [GELIC_EURUS_CMD_GET_WPA_CFG] = { .post_arg = 1},
107 [GELIC_EURUS_CMD_GET_RSSI_CFG] = { .post_arg = 1},
108 [GELIC_EURUS_CMD_GET_SCAN] = { .post_arg = 1},
109};
110
111#ifdef DEBUG
112static const char *cmdstr(enum gelic_eurus_command ix)
113{
114 switch (ix) {
115 case GELIC_EURUS_CMD_ASSOC:
116 return "ASSOC";
117 case GELIC_EURUS_CMD_DISASSOC:
118 return "DISASSOC";
119 case GELIC_EURUS_CMD_START_SCAN:
120 return "SCAN";
121 case GELIC_EURUS_CMD_GET_SCAN:
122 return "GET SCAN";
123 case GELIC_EURUS_CMD_SET_COMMON_CFG:
124 return "SET_COMMON_CFG";
125 case GELIC_EURUS_CMD_GET_COMMON_CFG:
126 return "GET_COMMON_CFG";
127 case GELIC_EURUS_CMD_SET_WEP_CFG:
128 return "SET_WEP_CFG";
129 case GELIC_EURUS_CMD_GET_WEP_CFG:
130 return "GET_WEP_CFG";
131 case GELIC_EURUS_CMD_SET_WPA_CFG:
132 return "SET_WPA_CFG";
133 case GELIC_EURUS_CMD_GET_WPA_CFG:
134 return "GET_WPA_CFG";
135 case GELIC_EURUS_CMD_GET_RSSI_CFG:
136 return "GET_RSSI";
137 default:
138 break;
139 }
140 return "";
141};
142#else
143static inline const char *cmdstr(enum gelic_eurus_command ix)
144{
145 return "";
146}
147#endif
148
149/* synchronously do eurus commands */
150static void gelic_eurus_sync_cmd_worker(struct work_struct *work)
151{
152 struct gelic_eurus_cmd *cmd;
153 struct gelic_card *card;
154 struct gelic_wl_info *wl;
155
156 u64 arg1, arg2;
157
158 pr_debug("%s: <-\n", __func__);
159 cmd = container_of(work, struct gelic_eurus_cmd, work);
160 BUG_ON(cmd_info[cmd->cmd].pre_arg &&
161 cmd_info[cmd->cmd].post_arg);
162 wl = cmd->wl;
163 card = port_to_card(wl_port(wl));
164
165 if (cmd_info[cmd->cmd].pre_arg) {
166 arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer));
167 arg2 = cmd->buf_size;
168 } else {
169 arg1 = 0;
170 arg2 = 0;
171 }
172 init_completion(&wl->cmd_done_intr);
173 pr_debug("%s: cmd='%s' start\n", __func__, cmdstr(cmd->cmd));
174 cmd->status = lv1_net_control(bus_id(card), dev_id(card),
175 GELIC_LV1_POST_WLAN_CMD,
176 cmd->cmd, arg1, arg2,
177 &cmd->tag, &cmd->size);
178 if (cmd->status) {
179 complete(&cmd->done);
180 pr_info("%s: cmd issue failed\n", __func__);
181 return;
182 }
183
184 wait_for_completion(&wl->cmd_done_intr);
185
186 if (cmd_info[cmd->cmd].post_arg) {
187 arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer));
188 arg2 = cmd->buf_size;
189 } else {
190 arg1 = 0;
191 arg2 = 0;
192 }
193
194 cmd->status = lv1_net_control(bus_id(card), dev_id(card),
195 GELIC_LV1_GET_WLAN_CMD_RESULT,
196 cmd->tag, arg1, arg2,
197 &cmd->cmd_status, &cmd->size);
198#ifdef DEBUG
199 if (cmd->status || cmd->cmd_status) {
200 pr_debug("%s: cmd done tag=%#lx arg1=%#lx, arg2=%#lx\n", __func__,
201 cmd->tag, arg1, arg2);
202 pr_debug("%s: cmd done status=%#x cmd_status=%#lx size=%#lx\n",
203 __func__, cmd->status, cmd->cmd_status, cmd->size);
204 }
205#endif
206 complete(&cmd->done);
207 pr_debug("%s: cmd='%s' done\n", __func__, cmdstr(cmd->cmd));
208}
209
210static struct gelic_eurus_cmd *gelic_eurus_sync_cmd(struct gelic_wl_info *wl,
211 unsigned int eurus_cmd,
212 void *buffer,
213 unsigned int buf_size)
214{
215 struct gelic_eurus_cmd *cmd;
216
217 /* allocate cmd */
218 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
219 if (!cmd)
220 return NULL;
221
222 /* initialize members */
223 cmd->cmd = eurus_cmd;
224 cmd->buffer = buffer;
225 cmd->buf_size = buf_size;
226 cmd->wl = wl;
227 INIT_WORK(&cmd->work, gelic_eurus_sync_cmd_worker);
228 init_completion(&cmd->done);
229 queue_work(wl->eurus_cmd_queue, &cmd->work);
230
231 /* wait for command completion */
232 wait_for_completion(&cmd->done);
233
234 return cmd;
235}
236
237static u32 gelic_wl_get_link(struct net_device *netdev)
238{
239 struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
240 u32 ret;
241
242 pr_debug("%s: <-\n", __func__);
243 down(&wl->assoc_stat_lock);
244 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
245 ret = 1;
246 else
247 ret = 0;
248 up(&wl->assoc_stat_lock);
249 pr_debug("%s: ->\n", __func__);
250 return ret;
251}
252
253static void gelic_wl_send_iwap_event(struct gelic_wl_info *wl, u8 *bssid)
254{
255 union iwreq_data data;
256
257 memset(&data, 0, sizeof(data));
258 if (bssid)
259 memcpy(data.ap_addr.sa_data, bssid, ETH_ALEN);
260 data.ap_addr.sa_family = ARPHRD_ETHER;
261 wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWAP,
262 &data, NULL);
263}
264
265/*
266 * wireless extension handlers and helpers
267 */
268
269/* SIOGIWNAME */
270static int gelic_wl_get_name(struct net_device *dev,
271 struct iw_request_info *info,
272 union iwreq_data *iwreq, char *extra)
273{
274 strcpy(iwreq->name, "IEEE 802.11bg");
275 return 0;
276}
277
278static void gelic_wl_get_ch_info(struct gelic_wl_info *wl)
279{
280 struct gelic_card *card = port_to_card(wl_port(wl));
281 u64 ch_info_raw, tmp;
282 int status;
283
284 if (!test_and_set_bit(GELIC_WL_STAT_CH_INFO, &wl->stat)) {
285 status = lv1_net_control(bus_id(card), dev_id(card),
286 GELIC_LV1_GET_CHANNEL, 0, 0, 0,
287 &ch_info_raw,
288 &tmp);
289 /* some fw versions may return error */
290 if (status) {
291 if (status != LV1_NO_ENTRY)
292 pr_info("%s: available ch unknown\n", __func__);
293 wl->ch_info = 0x07ff;/* 11 ch */
294 } else
295 /* 16 bits of MSB has available channels */
296 wl->ch_info = ch_info_raw >> 48;
297 }
298 return;
299}
300
301/* SIOGIWRANGE */
302static int gelic_wl_get_range(struct net_device *netdev,
303 struct iw_request_info *info,
304 union iwreq_data *iwreq, char *extra)
305{
306 struct iw_point *point = &iwreq->data;
307 struct iw_range *range = (struct iw_range *)extra;
308 struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
309 unsigned int i, chs;
310
311 pr_debug("%s: <-\n", __func__);
312 point->length = sizeof(struct iw_range);
313 memset(range, 0, sizeof(struct iw_range));
314
315 range->we_version_compiled = WIRELESS_EXT;
316 range->we_version_source = 22;
317
318 /* available channels and frequencies */
319 gelic_wl_get_ch_info(wl);
320
321 for (i = 0, chs = 0;
322 i < NUM_CHANNELS && chs < IW_MAX_FREQUENCIES; i++)
323 if (wl->ch_info & (1 << i)) {
324 range->freq[chs].i = i + 1;
325 range->freq[chs].m = channel_freq[i];
326 range->freq[chs].e = 6;
327 chs++;
328 }
329 range->num_frequency = chs;
330 range->old_num_frequency = chs;
331 range->num_channels = chs;
332 range->old_num_channels = chs;
333
334 /* bitrates */
335 for (i = 0; i < NUM_BITRATES; i++)
336 range->bitrate[i] = bitrate_list[i];
337 range->num_bitrates = i;
338
339 /* signal levels */
340 range->max_qual.qual = 100; /* relative value */
341 range->max_qual.level = 100;
342 range->avg_qual.qual = 50;
343 range->avg_qual.level = 50;
344 range->sensitivity = 0;
345
346 /* Event capability */
347 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
348 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
349 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
350
351 /* encryption capability */
352 range->enc_capa = IW_ENC_CAPA_WPA |
353 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
354 if (wpa2_capable())
355 range->enc_capa |= IW_ENC_CAPA_WPA2;
356 range->encoding_size[0] = 5; /* 40bit WEP */
357 range->encoding_size[1] = 13; /* 104bit WEP */
358 range->encoding_size[2] = 32; /* WPA-PSK */
359 range->num_encoding_sizes = 3;
360 range->max_encoding_tokens = GELIC_WEP_KEYS;
361
362 pr_debug("%s: ->\n", __func__);
363 return 0;
364
365}
366
367/* SIOC{G,S}IWSCAN */
368static int gelic_wl_set_scan(struct net_device *netdev,
369 struct iw_request_info *info,
370 union iwreq_data *wrqu, char *extra)
371{
372 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
373
374 return gelic_wl_start_scan(wl, 1);
375}
376
377#define OUI_LEN 3
378static const u8 rsn_oui[OUI_LEN] = { 0x00, 0x0f, 0xac };
379static const u8 wpa_oui[OUI_LEN] = { 0x00, 0x50, 0xf2 };
380
381/*
382 * synthesize WPA/RSN IE data
383 * See WiFi WPA specification and IEEE 802.11-2007 7.3.2.25
384 * for the format
385 */
386static size_t gelic_wl_synthesize_ie(u8 *buf,
387 struct gelic_eurus_scan_info *scan)
388{
389
390 const u8 *oui_header;
391 u8 *start = buf;
392 int rsn;
393 int ccmp;
394
395 pr_debug("%s: <- sec=%16x\n", __func__, scan->security);
396 switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_MASK) {
397 case GELIC_EURUS_SCAN_SEC_WPA:
398 rsn = 0;
399 break;
400 case GELIC_EURUS_SCAN_SEC_WPA2:
401 rsn = 1;
402 break;
403 default:
404 /* WEP or none. No IE returned */
405 return 0;
406 }
407
408 switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_WPA_MASK) {
409 case GELIC_EURUS_SCAN_SEC_WPA_TKIP:
410 ccmp = 0;
411 break;
412 case GELIC_EURUS_SCAN_SEC_WPA_AES:
413 ccmp = 1;
414 break;
415 default:
416 if (rsn) {
417 ccmp = 1;
418 pr_info("%s: no cipher info. defaulted to CCMP\n",
419 __func__);
420 } else {
421 ccmp = 0;
422 pr_info("%s: no cipher info. defaulted to TKIP\n",
423 __func__);
424 }
425 }
426
427 if (rsn)
428 oui_header = rsn_oui;
429 else
430 oui_header = wpa_oui;
431
432 /* element id */
433 if (rsn)
434 *buf++ = MFIE_TYPE_RSN;
435 else
436 *buf++ = MFIE_TYPE_GENERIC;
437
438 /* length filed; set later */
439 buf++;
440
441 /* wpa special header */
442 if (!rsn) {
443 memcpy(buf, wpa_oui, OUI_LEN);
444 buf += OUI_LEN;
445 *buf++ = 0x01;
446 }
447
448 /* version */
449 *buf++ = 0x01; /* version 1.0 */
450 *buf++ = 0x00;
451
452 /* group cipher */
453 memcpy(buf, oui_header, OUI_LEN);
454 buf += OUI_LEN;
455
456 if (ccmp)
457 *buf++ = 0x04; /* CCMP */
458 else
459 *buf++ = 0x02; /* TKIP */
460
461 /* pairwise key count always 1 */
462 *buf++ = 0x01;
463 *buf++ = 0x00;
464
465 /* pairwise key suit */
466 memcpy(buf, oui_header, OUI_LEN);
467 buf += OUI_LEN;
468 if (ccmp)
469 *buf++ = 0x04; /* CCMP */
470 else
471 *buf++ = 0x02; /* TKIP */
472
473 /* AKM count is 1 */
474 *buf++ = 0x01;
475 *buf++ = 0x00;
476
477 /* AKM suite is assumed as PSK*/
478 memcpy(buf, oui_header, OUI_LEN);
479 buf += OUI_LEN;
480 *buf++ = 0x02; /* PSK */
481
482 /* RSN capabilities is 0 */
483 *buf++ = 0x00;
484 *buf++ = 0x00;
485
486 /* set length field */
487 start[1] = (buf - start - 2);
488
489 pr_debug("%s: ->\n", __func__);
490 return (buf - start);
491}
492
493struct ie_item {
494 u8 *data;
495 u8 len;
496};
497
498struct ie_info {
499 struct ie_item wpa;
500 struct ie_item rsn;
501};
502
503static void gelic_wl_parse_ie(u8 *data, size_t len,
504 struct ie_info *ie_info)
505{
506 size_t data_left = len;
507 u8 *pos = data;
508 u8 item_len;
509 u8 item_id;
510
511 pr_debug("%s: data=%p len=%ld \n", __func__,
512 data, len);
513 memset(ie_info, 0, sizeof(struct ie_info));
514
515 while (0 < data_left) {
516 item_id = *pos++;
517 item_len = *pos++;
518
519 switch (item_id) {
520 case MFIE_TYPE_GENERIC:
521 if (!memcmp(pos, wpa_oui, OUI_LEN) &&
522 pos[OUI_LEN] == 0x01) {
523 ie_info->wpa.data = pos - 2;
524 ie_info->wpa.len = item_len + 2;
525 }
526 break;
527 case MFIE_TYPE_RSN:
528 ie_info->rsn.data = pos - 2;
529 /* length includes the header */
530 ie_info->rsn.len = item_len + 2;
531 break;
532 default:
533 pr_debug("%s: ignore %#x,%d\n", __func__,
534 item_id, item_len);
535 break;
536 }
537 pos += item_len;
538 data_left -= item_len + 2;
539 }
540 pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__,
541 ie_info->wpa.data, ie_info->wpa.len,
542 ie_info->rsn.data, ie_info->rsn.len);
543}
544
545
546/*
547 * translate the scan informations from hypervisor to a
548 * independent format
549 */
550static char *gelic_wl_translate_scan(struct net_device *netdev,
551 char *ev,
552 char *stop,
553 struct gelic_wl_scan_info *network)
554{
555 struct iw_event iwe;
556 struct gelic_eurus_scan_info *scan = network->hwinfo;
557 char *tmp;
558 u8 rate;
559 unsigned int i, j, len;
560 u8 buf[MAX_WPA_IE_LEN];
561
562 pr_debug("%s: <-\n", __func__);
563
564 /* first entry should be AP's mac address */
565 iwe.cmd = SIOCGIWAP;
566 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
567 memcpy(iwe.u.ap_addr.sa_data, &scan->bssid[2], ETH_ALEN);
568 ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_ADDR_LEN);
569
570 /* ESSID */
571 iwe.cmd = SIOCGIWESSID;
572 iwe.u.data.flags = 1;
573 iwe.u.data.length = strnlen(scan->essid, 32);
574 ev = iwe_stream_add_point(ev, stop, &iwe, scan->essid);
575
576 /* FREQUENCY */
577 iwe.cmd = SIOCGIWFREQ;
578 iwe.u.freq.m = be16_to_cpu(scan->channel);
579 iwe.u.freq.e = 0; /* table value in MHz */
580 iwe.u.freq.i = 0;
581 ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_FREQ_LEN);
582
583 /* RATES */
584 iwe.cmd = SIOCGIWRATE;
585 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
586 /* to stuff multiple values in one event */
587 tmp = ev + IW_EV_LCP_LEN;
588 /* put them in ascendant order (older is first) */
589 i = 0;
590 j = 0;
591 pr_debug("%s: rates=%d rate=%d\n", __func__,
592 network->rate_len, network->rate_ext_len);
593 while (i < network->rate_len) {
594 if (j < network->rate_ext_len &&
595 ((scan->ext_rate[j] & 0x7f) < (scan->rate[i] & 0x7f)))
596 rate = scan->ext_rate[j++] & 0x7f;
597 else
598 rate = scan->rate[i++] & 0x7f;
599 iwe.u.bitrate.value = rate * 500000; /* 500kbps unit */
600 tmp = iwe_stream_add_value(ev, tmp, stop, &iwe,
601 IW_EV_PARAM_LEN);
602 }
603 while (j < network->rate_ext_len) {
604 iwe.u.bitrate.value = (scan->ext_rate[j++] & 0x7f) * 500000;
605 tmp = iwe_stream_add_value(ev, tmp, stop, &iwe,
606 IW_EV_PARAM_LEN);
607 }
608 /* Check if we added any rate */
609 if (IW_EV_LCP_LEN < (tmp - ev))
610 ev = tmp;
611
612 /* ENCODE */
613 iwe.cmd = SIOCGIWENCODE;
614 if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_PRIVACY)
615 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
616 else
617 iwe.u.data.flags = IW_ENCODE_DISABLED;
618 iwe.u.data.length = 0;
619 ev = iwe_stream_add_point(ev, stop, &iwe, scan->essid);
620
621 /* MODE */
622 iwe.cmd = SIOCGIWMODE;
623 if (be16_to_cpu(scan->capability) &
624 (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
625 if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_ESS)
626 iwe.u.mode = IW_MODE_MASTER;
627 else
628 iwe.u.mode = IW_MODE_ADHOC;
629 ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_UINT_LEN);
630 }
631
632 /* QUAL */
633 iwe.cmd = IWEVQUAL;
634 iwe.u.qual.updated = IW_QUAL_ALL_UPDATED |
635 IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
636 iwe.u.qual.level = be16_to_cpu(scan->rssi);
637 iwe.u.qual.qual = be16_to_cpu(scan->rssi);
638 iwe.u.qual.noise = 0;
639 ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_QUAL_LEN);
640
641 /* RSN */
642 memset(&iwe, 0, sizeof(iwe));
643 if (be16_to_cpu(scan->size) <= sizeof(*scan)) {
644 /* If wpa[2] capable station, synthesize IE and put it */
645 len = gelic_wl_synthesize_ie(buf, scan);
646 if (len) {
647 iwe.cmd = IWEVGENIE;
648 iwe.u.data.length = len;
649 ev = iwe_stream_add_point(ev, stop, &iwe, buf);
650 }
651 } else {
652 /* this scan info has IE data */
653 struct ie_info ie_info;
654 size_t data_len;
655
656 data_len = be16_to_cpu(scan->size) - sizeof(*scan);
657
658 gelic_wl_parse_ie(scan->elements, data_len, &ie_info);
659
660 if (ie_info.wpa.len && (ie_info.wpa.len <= sizeof(buf))) {
661 memcpy(buf, ie_info.wpa.data, ie_info.wpa.len);
662 iwe.cmd = IWEVGENIE;
663 iwe.u.data.length = ie_info.wpa.len;
664 ev = iwe_stream_add_point(ev, stop, &iwe, buf);
665 }
666
667 if (ie_info.rsn.len && (ie_info.rsn.len <= sizeof(buf))) {
668 memset(&iwe, 0, sizeof(iwe));
669 memcpy(buf, ie_info.rsn.data, ie_info.rsn.len);
670 iwe.cmd = IWEVGENIE;
671 iwe.u.data.length = ie_info.rsn.len;
672 ev = iwe_stream_add_point(ev, stop, &iwe, buf);
673 }
674 }
675
676 pr_debug("%s: ->\n", __func__);
677 return ev;
678}
679
680
681static int gelic_wl_get_scan(struct net_device *netdev,
682 struct iw_request_info *info,
683 union iwreq_data *wrqu, char *extra)
684{
685 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
686 struct gelic_wl_scan_info *scan_info;
687 char *ev = extra;
688 char *stop = ev + wrqu->data.length;
689 int ret = 0;
690 unsigned long this_time = jiffies;
691
692 pr_debug("%s: <-\n", __func__);
693 if (down_interruptible(&wl->scan_lock))
694 return -EAGAIN;
695
696 switch (wl->scan_stat) {
697 case GELIC_WL_SCAN_STAT_SCANNING:
698 /* If a scan in progress, caller should call me again */
699 ret = -EAGAIN;
700 goto out;
701 break;
702
703 case GELIC_WL_SCAN_STAT_INIT:
704 /* last scan request failed or never issued */
705 ret = -ENODEV;
706 goto out;
707 break;
708 case GELIC_WL_SCAN_STAT_GOT_LIST:
709 /* ok, use current list */
710 break;
711 }
712
713 list_for_each_entry(scan_info, &wl->network_list, list) {
714 if (wl->scan_age == 0 ||
715 time_after(scan_info->last_scanned + wl->scan_age,
716 this_time))
717 ev = gelic_wl_translate_scan(netdev, ev, stop,
718 scan_info);
719 else
720 pr_debug("%s:entry too old\n", __func__);
721
722 if (stop - ev <= IW_EV_ADDR_LEN) {
723 ret = -E2BIG;
724 goto out;
725 }
726 }
727
728 wrqu->data.length = ev - extra;
729 wrqu->data.flags = 0;
730out:
731 up(&wl->scan_lock);
732 pr_debug("%s: -> %d %d\n", __func__, ret, wrqu->data.length);
733 return ret;
734}
735
736#ifdef DEBUG
737static void scan_list_dump(struct gelic_wl_info *wl)
738{
739 struct gelic_wl_scan_info *scan_info;
740 int i;
741 DECLARE_MAC_BUF(mac);
742
743 i = 0;
744 list_for_each_entry(scan_info, &wl->network_list, list) {
745 pr_debug("%s: item %d\n", __func__, i++);
746 pr_debug("valid=%d eurusindex=%d last=%lx\n",
747 scan_info->valid, scan_info->eurus_index,
748 scan_info->last_scanned);
749 pr_debug("r_len=%d r_ext_len=%d essid_len=%d\n",
750 scan_info->rate_len, scan_info->rate_ext_len,
751 scan_info->essid_len);
752 /* -- */
753 pr_debug("bssid=%s\n",
754 print_mac(mac, &scan_info->hwinfo->bssid[2]));
755 pr_debug("essid=%s\n", scan_info->hwinfo->essid);
756 }
757}
758#endif
759
760static int gelic_wl_set_auth(struct net_device *netdev,
761 struct iw_request_info *info,
762 union iwreq_data *data, char *extra)
763{
764 struct iw_param *param = &data->param;
765 struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
766 unsigned long irqflag;
767 int ret = 0;
768
769 pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX);
770 spin_lock_irqsave(&wl->lock, irqflag);
771 switch (param->flags & IW_AUTH_INDEX) {
772 case IW_AUTH_WPA_VERSION:
773 if (param->value & IW_AUTH_WPA_VERSION_DISABLED) {
774 pr_debug("%s: NO WPA selected\n", __func__);
775 wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
776 wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
777 wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
778 }
779 if (param->value & IW_AUTH_WPA_VERSION_WPA) {
780 pr_debug("%s: WPA version 1 selected\n", __func__);
781 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
782 wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
783 wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
784 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
785 }
786 if (param->value & IW_AUTH_WPA_VERSION_WPA2) {
787 /*
788 * As the hypervisor may not tell the cipher
789 * information of the AP if it is WPA2,
790 * you will not decide suitable cipher from
791 * its beacon.
792 * You should have knowledge about the AP's
793 * cipher infomation in other method prior to
794 * the association.
795 */
796 if (!precise_ie())
797 pr_info("%s: WPA2 may not work\n", __func__);
798 if (wpa2_capable()) {
799 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2;
800 wl->group_cipher_method = GELIC_WL_CIPHER_AES;
801 wl->pairwise_cipher_method =
802 GELIC_WL_CIPHER_AES;
803 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
804 } else
805 ret = -EINVAL;
806 }
807 break;
808
809 case IW_AUTH_CIPHER_PAIRWISE:
810 if (param->value &
811 (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) {
812 pr_debug("%s: WEP selected\n", __func__);
813 wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
814 }
815 if (param->value & IW_AUTH_CIPHER_TKIP) {
816 pr_debug("%s: TKIP selected\n", __func__);
817 wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
818 }
819 if (param->value & IW_AUTH_CIPHER_CCMP) {
820 pr_debug("%s: CCMP selected\n", __func__);
821 wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES;
822 }
823 if (param->value & IW_AUTH_CIPHER_NONE) {
824 pr_debug("%s: no auth selected\n", __func__);
825 wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
826 }
827 break;
828 case IW_AUTH_CIPHER_GROUP:
829 if (param->value &
830 (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) {
831 pr_debug("%s: WEP selected\n", __func__);
832 wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
833 }
834 if (param->value & IW_AUTH_CIPHER_TKIP) {
835 pr_debug("%s: TKIP selected\n", __func__);
836 wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
837 }
838 if (param->value & IW_AUTH_CIPHER_CCMP) {
839 pr_debug("%s: CCMP selected\n", __func__);
840 wl->group_cipher_method = GELIC_WL_CIPHER_AES;
841 }
842 if (param->value & IW_AUTH_CIPHER_NONE) {
843 pr_debug("%s: no auth selected\n", __func__);
844 wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
845 }
846 break;
847 case IW_AUTH_80211_AUTH_ALG:
848 if (param->value & IW_AUTH_ALG_SHARED_KEY) {
849 pr_debug("%s: shared key specified\n", __func__);
850 wl->auth_method = GELIC_EURUS_AUTH_SHARED;
851 } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
852 pr_debug("%s: open system specified\n", __func__);
853 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
854 } else
855 ret = -EINVAL;
856 break;
857
858 case IW_AUTH_WPA_ENABLED:
859 if (param->value) {
860 pr_debug("%s: WPA enabled\n", __func__);
861 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
862 } else {
863 pr_debug("%s: WPA disabled\n", __func__);
864 wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
865 }
866 break;
867
868 case IW_AUTH_KEY_MGMT:
869 if (param->value & IW_AUTH_KEY_MGMT_PSK)
870 break;
871 /* intentionally fall through */
872 default:
873 ret = -EOPNOTSUPP;
874 break;
875 };
876
877 if (!ret)
878 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
879
880 spin_unlock_irqrestore(&wl->lock, irqflag);
881 pr_debug("%s: -> %d\n", __func__, ret);
882 return ret;
883}
884
885static int gelic_wl_get_auth(struct net_device *netdev,
886 struct iw_request_info *info,
887 union iwreq_data *iwreq, char *extra)
888{
889 struct iw_param *param = &iwreq->param;
890 struct gelic_wl_info *wl = port_wl(netdev_port(netdev));
891 unsigned long irqflag;
892 int ret = 0;
893
894 pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX);
895 spin_lock_irqsave(&wl->lock, irqflag);
896 switch (param->flags & IW_AUTH_INDEX) {
897 case IW_AUTH_WPA_VERSION:
898 switch (wl->wpa_level) {
899 case GELIC_WL_WPA_LEVEL_WPA:
900 param->value |= IW_AUTH_WPA_VERSION_WPA;
901 break;
902 case GELIC_WL_WPA_LEVEL_WPA2:
903 param->value |= IW_AUTH_WPA_VERSION_WPA2;
904 break;
905 default:
906 param->value |= IW_AUTH_WPA_VERSION_DISABLED;
907 }
908 break;
909
910 case IW_AUTH_80211_AUTH_ALG:
911 if (wl->auth_method == GELIC_EURUS_AUTH_SHARED)
912 param->value = IW_AUTH_ALG_SHARED_KEY;
913 else if (wl->auth_method == GELIC_EURUS_AUTH_OPEN)
914 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
915 break;
916
917 case IW_AUTH_WPA_ENABLED:
918 switch (wl->wpa_level) {
919 case GELIC_WL_WPA_LEVEL_WPA:
920 case GELIC_WL_WPA_LEVEL_WPA2:
921 param->value = 1;
922 break;
923 default:
924 param->value = 0;
925 break;
926 }
927 break;
928 default:
929 ret = -EOPNOTSUPP;
930 }
931
932 spin_unlock_irqrestore(&wl->lock, irqflag);
933 pr_debug("%s: -> %d\n", __func__, ret);
934 return ret;
935}
936
937/* SIOC{S,G}IWESSID */
938static int gelic_wl_set_essid(struct net_device *netdev,
939 struct iw_request_info *info,
940 union iwreq_data *data, char *extra)
941{
942 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
943 unsigned long irqflag;
944
945 pr_debug("%s: <- l=%d f=%d\n", __func__,
946 data->essid.length, data->essid.flags);
947 if (IW_ESSID_MAX_SIZE < data->essid.length)
948 return -EINVAL;
949
950 spin_lock_irqsave(&wl->lock, irqflag);
951 if (data->essid.flags) {
952 wl->essid_len = data->essid.length;
953 memcpy(wl->essid, extra, wl->essid_len);
954 pr_debug("%s: essid = '%s'\n", __func__, extra);
955 set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
956 } else {
957 pr_debug("%s: ESSID any \n", __func__);
958 clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
959 }
960 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
961 spin_unlock_irqrestore(&wl->lock, irqflag);
962
963
964 gelic_wl_try_associate(netdev); /* FIXME */
965 pr_debug("%s: -> \n", __func__);
966 return 0;
967}
968
969static int gelic_wl_get_essid(struct net_device *netdev,
970 struct iw_request_info *info,
971 union iwreq_data *data, char *extra)
972{
973 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
974 unsigned long irqflag;
975
976 pr_debug("%s: <- \n", __func__);
977 down(&wl->assoc_stat_lock);
978 spin_lock_irqsave(&wl->lock, irqflag);
979 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
980 wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) {
981 memcpy(extra, wl->essid, wl->essid_len);
982 data->essid.length = wl->essid_len;
983 data->essid.flags = 1;
984 } else
985 data->essid.flags = 0;
986
987 up(&wl->assoc_stat_lock);
988 spin_unlock_irqrestore(&wl->lock, irqflag);
989 pr_debug("%s: -> len=%d \n", __func__, data->essid.length);
990
991 return 0;
992}
993
994/* SIO{S,G}IWENCODE */
995static int gelic_wl_set_encode(struct net_device *netdev,
996 struct iw_request_info *info,
997 union iwreq_data *data, char *extra)
998{
999 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1000 struct iw_point *enc = &data->encoding;
1001 __u16 flags;
1002 unsigned int irqflag;
1003 int key_index, index_specified;
1004 int ret = 0;
1005
1006 pr_debug("%s: <- \n", __func__);
1007 flags = enc->flags & IW_ENCODE_FLAGS;
1008 key_index = enc->flags & IW_ENCODE_INDEX;
1009
1010 pr_debug("%s: key_index = %d\n", __func__, key_index);
1011 pr_debug("%s: key_len = %d\n", __func__, enc->length);
1012 pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
1013
1014 if (GELIC_WEP_KEYS < key_index)
1015 return -EINVAL;
1016
1017 spin_lock_irqsave(&wl->lock, irqflag);
1018 if (key_index) {
1019 index_specified = 1;
1020 key_index--;
1021 } else {
1022 index_specified = 0;
1023 key_index = wl->current_key;
1024 }
1025
1026 if (flags & IW_ENCODE_NOKEY) {
1027 /* if just IW_ENCODE_NOKEY, change current key index */
1028 if (!flags && index_specified) {
1029 wl->current_key = key_index;
1030 goto done;
1031 }
1032
1033 if (flags & IW_ENCODE_DISABLED) {
1034 if (!index_specified) {
1035 /* disable encryption */
1036 wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
1037 wl->pairwise_cipher_method =
1038 GELIC_WL_CIPHER_NONE;
1039 /* invalidate all key */
1040 wl->key_enabled = 0;
1041 } else
1042 clear_bit(key_index, &wl->key_enabled);
1043 }
1044
1045 if (flags & IW_ENCODE_OPEN)
1046 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
1047 if (flags & IW_ENCODE_RESTRICTED) {
1048 pr_info("%s: shared key mode enabled\n", __func__);
1049 wl->auth_method = GELIC_EURUS_AUTH_SHARED;
1050 }
1051 } else {
1052 if (IW_ENCODING_TOKEN_MAX < enc->length) {
1053 ret = -EINVAL;
1054 goto done;
1055 }
1056 wl->key_len[key_index] = enc->length;
1057 memcpy(wl->key[key_index], extra, enc->length);
1058 set_bit(key_index, &wl->key_enabled);
1059 wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP;
1060 wl->group_cipher_method = GELIC_WL_CIPHER_WEP;
1061 }
1062 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1063done:
1064 spin_unlock_irqrestore(&wl->lock, irqflag);
1065 pr_debug("%s: -> \n", __func__);
1066 return ret;
1067}
1068
1069static int gelic_wl_get_encode(struct net_device *netdev,
1070 struct iw_request_info *info,
1071 union iwreq_data *data, char *extra)
1072{
1073 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1074 struct iw_point *enc = &data->encoding;
1075 unsigned int irqflag;
1076 unsigned int key_index, index_specified;
1077 int ret = 0;
1078
1079 pr_debug("%s: <- \n", __func__);
1080 key_index = enc->flags & IW_ENCODE_INDEX;
1081 pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
1082 enc->flags, enc->pointer, enc->length, extra);
1083 if (GELIC_WEP_KEYS < key_index)
1084 return -EINVAL;
1085
1086 spin_lock_irqsave(&wl->lock, irqflag);
1087 if (key_index) {
1088 index_specified = 1;
1089 key_index--;
1090 } else {
1091 index_specified = 0;
1092 key_index = wl->current_key;
1093 }
1094
1095 if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
1096 switch (wl->auth_method) {
1097 case GELIC_EURUS_AUTH_OPEN:
1098 enc->flags = IW_ENCODE_OPEN;
1099 break;
1100 case GELIC_EURUS_AUTH_SHARED:
1101 enc->flags = IW_ENCODE_RESTRICTED;
1102 break;
1103 }
1104 } else
1105 enc->flags = IW_ENCODE_DISABLED;
1106
1107 if (test_bit(key_index, &wl->key_enabled)) {
1108 if (enc->length < wl->key_len[key_index]) {
1109 ret = -EINVAL;
1110 goto done;
1111 }
1112 enc->length = wl->key_len[key_index];
1113 memcpy(extra, wl->key[key_index], wl->key_len[key_index]);
1114 } else {
1115 enc->length = 0;
1116 enc->flags |= IW_ENCODE_NOKEY;
1117 }
1118 enc->flags |= key_index + 1;
1119 pr_debug("%s: -> flag=%x len=%d\n", __func__,
1120 enc->flags, enc->length);
1121
1122done:
1123 spin_unlock_irqrestore(&wl->lock, irqflag);
1124 return ret;
1125}
1126
1127/* SIOC{S,G}IWAP */
1128static int gelic_wl_set_ap(struct net_device *netdev,
1129 struct iw_request_info *info,
1130 union iwreq_data *data, char *extra)
1131{
1132 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1133 unsigned long irqflag;
1134
1135 pr_debug("%s: <-\n", __func__);
1136 if (data->ap_addr.sa_family != ARPHRD_ETHER)
1137 return -EINVAL;
1138
1139 spin_lock_irqsave(&wl->lock, irqflag);
1140 if (is_valid_ether_addr(data->ap_addr.sa_data)) {
1141 memcpy(wl->bssid, data->ap_addr.sa_data,
1142 ETH_ALEN);
1143 set_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
1144 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1145 pr_debug("%s: bss=%02x:%02x:%02x:%02x:%02x:%02x\n",
1146 __func__,
1147 wl->bssid[0], wl->bssid[1],
1148 wl->bssid[2], wl->bssid[3],
1149 wl->bssid[4], wl->bssid[5]);
1150 } else {
1151 pr_debug("%s: clear bssid\n", __func__);
1152 clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
1153 memset(wl->bssid, 0, ETH_ALEN);
1154 }
1155 spin_unlock_irqrestore(&wl->lock, irqflag);
1156 pr_debug("%s: ->\n", __func__);
1157 return 0;
1158}
1159
1160static int gelic_wl_get_ap(struct net_device *netdev,
1161 struct iw_request_info *info,
1162 union iwreq_data *data, char *extra)
1163{
1164 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1165 unsigned long irqflag;
1166
1167 pr_debug("%s: <-\n", __func__);
1168 down(&wl->assoc_stat_lock);
1169 spin_lock_irqsave(&wl->lock, irqflag);
1170 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) {
1171 data->ap_addr.sa_family = ARPHRD_ETHER;
1172 memcpy(data->ap_addr.sa_data, wl->active_bssid,
1173 ETH_ALEN);
1174 } else
1175 memset(data->ap_addr.sa_data, 0, ETH_ALEN);
1176
1177 spin_unlock_irqrestore(&wl->lock, irqflag);
1178 up(&wl->assoc_stat_lock);
1179 pr_debug("%s: ->\n", __func__);
1180 return 0;
1181}
1182
1183/* SIOC{S,G}IWENCODEEXT */
1184static int gelic_wl_set_encodeext(struct net_device *netdev,
1185 struct iw_request_info *info,
1186 union iwreq_data *data, char *extra)
1187{
1188 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1189 struct iw_point *enc = &data->encoding;
1190 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1191 __u16 alg;
1192 __u16 flags;
1193 unsigned int irqflag;
1194 int key_index;
1195 int ret = 0;
1196
1197 pr_debug("%s: <- \n", __func__);
1198 flags = enc->flags & IW_ENCODE_FLAGS;
1199 alg = ext->alg;
1200 key_index = enc->flags & IW_ENCODE_INDEX;
1201
1202 pr_debug("%s: key_index = %d\n", __func__, key_index);
1203 pr_debug("%s: key_len = %d\n", __func__, enc->length);
1204 pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
1205 pr_debug("%s: ext_flag=%x\n", __func__, ext->ext_flags);
1206 pr_debug("%s: ext_key_len=%x\n", __func__, ext->key_len);
1207
1208 if (GELIC_WEP_KEYS < key_index)
1209 return -EINVAL;
1210
1211 spin_lock_irqsave(&wl->lock, irqflag);
1212 if (key_index)
1213 key_index--;
1214 else
1215 key_index = wl->current_key;
1216
1217 if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
1218 /* reques to change default key index */
1219 pr_debug("%s: request to change default key to %d\n",
1220 __func__, key_index);
1221 wl->current_key = key_index;
1222 goto done;
1223 }
1224
1225 if (alg == IW_ENCODE_ALG_NONE || (flags & IW_ENCODE_DISABLED)) {
1226 pr_debug("%s: alg disabled\n", __func__);
1227 wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
1228 wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
1229 wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
1230 wl->auth_method = GELIC_EURUS_AUTH_OPEN; /* should be open */
1231 } else if (alg == IW_ENCODE_ALG_WEP) {
1232 pr_debug("%s: WEP requested\n", __func__);
1233 if (flags & IW_ENCODE_OPEN) {
1234 pr_debug("%s: open key mode\n", __func__);
1235 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
1236 }
1237 if (flags & IW_ENCODE_RESTRICTED) {
1238 pr_debug("%s: shared key mode\n", __func__);
1239 wl->auth_method = GELIC_EURUS_AUTH_SHARED;
1240 }
1241 if (IW_ENCODING_TOKEN_MAX < ext->key_len) {
1242 pr_info("%s: key is too long %d\n", __func__,
1243 ext->key_len);
1244 ret = -EINVAL;
1245 goto done;
1246 }
1247 /* OK, update the key */
1248 wl->key_len[key_index] = ext->key_len;
1249 memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX);
1250 memcpy(wl->key[key_index], ext->key, ext->key_len);
1251 set_bit(key_index, &wl->key_enabled);
1252 /* remember wep info changed */
1253 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1254 } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) {
1255 pr_debug("%s: TKIP/CCMP requested alg=%d\n", __func__, alg);
1256 /* check key length */
1257 if (IW_ENCODING_TOKEN_MAX < ext->key_len) {
1258 pr_info("%s: key is too long %d\n", __func__,
1259 ext->key_len);
1260 ret = -EINVAL;
1261 goto done;
1262 }
1263 if (alg == IW_ENCODE_ALG_CCMP) {
1264 pr_debug("%s: AES selected\n", __func__);
1265 wl->group_cipher_method = GELIC_WL_CIPHER_AES;
1266 wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES;
1267 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2;
1268 } else {
1269 pr_debug("%s: TKIP selected, WPA forced\n", __func__);
1270 wl->group_cipher_method = GELIC_WL_CIPHER_TKIP;
1271 wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP;
1272 /* FIXME: how do we do if WPA2 + TKIP? */
1273 wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA;
1274 }
1275 if (flags & IW_ENCODE_RESTRICTED)
1276 BUG();
1277 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
1278 /* We should use same key for both and unicast */
1279 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
1280 pr_debug("%s: group key \n", __func__);
1281 else
1282 pr_debug("%s: unicast key \n", __func__);
1283 /* OK, update the key */
1284 wl->key_len[key_index] = ext->key_len;
1285 memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX);
1286 memcpy(wl->key[key_index], ext->key, ext->key_len);
1287 set_bit(key_index, &wl->key_enabled);
1288 /* remember info changed */
1289 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1290 }
1291done:
1292 spin_unlock_irqrestore(&wl->lock, irqflag);
1293 pr_debug("%s: -> \n", __func__);
1294 return ret;
1295}
1296
1297static int gelic_wl_get_encodeext(struct net_device *netdev,
1298 struct iw_request_info *info,
1299 union iwreq_data *data, char *extra)
1300{
1301 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1302 struct iw_point *enc = &data->encoding;
1303 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1304 unsigned int irqflag;
1305 int key_index;
1306 int ret = 0;
1307 int max_key_len;
1308
1309 pr_debug("%s: <- \n", __func__);
1310
1311 max_key_len = enc->length - sizeof(struct iw_encode_ext);
1312 if (max_key_len < 0)
1313 return -EINVAL;
1314 key_index = enc->flags & IW_ENCODE_INDEX;
1315
1316 pr_debug("%s: key_index = %d\n", __func__, key_index);
1317 pr_debug("%s: key_len = %d\n", __func__, enc->length);
1318 pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS);
1319
1320 if (GELIC_WEP_KEYS < key_index)
1321 return -EINVAL;
1322
1323 spin_lock_irqsave(&wl->lock, irqflag);
1324 if (key_index)
1325 key_index--;
1326 else
1327 key_index = wl->current_key;
1328
1329 memset(ext, 0, sizeof(struct iw_encode_ext));
1330 switch (wl->group_cipher_method) {
1331 case GELIC_WL_CIPHER_WEP:
1332 ext->alg = IW_ENCODE_ALG_WEP;
1333 enc->flags |= IW_ENCODE_ENABLED;
1334 break;
1335 case GELIC_WL_CIPHER_TKIP:
1336 ext->alg = IW_ENCODE_ALG_TKIP;
1337 enc->flags |= IW_ENCODE_ENABLED;
1338 break;
1339 case GELIC_WL_CIPHER_AES:
1340 ext->alg = IW_ENCODE_ALG_CCMP;
1341 enc->flags |= IW_ENCODE_ENABLED;
1342 break;
1343 case GELIC_WL_CIPHER_NONE:
1344 default:
1345 ext->alg = IW_ENCODE_ALG_NONE;
1346 enc->flags |= IW_ENCODE_NOKEY;
1347 break;
1348 }
1349
1350 if (!(enc->flags & IW_ENCODE_NOKEY)) {
1351 if (max_key_len < wl->key_len[key_index]) {
1352 ret = -E2BIG;
1353 goto out;
1354 }
1355 if (test_bit(key_index, &wl->key_enabled))
1356 memcpy(ext->key, wl->key[key_index],
1357 wl->key_len[key_index]);
1358 else
1359 pr_debug("%s: disabled key requested ix=%d\n",
1360 __func__, key_index);
1361 }
1362out:
1363 spin_unlock_irqrestore(&wl->lock, irqflag);
1364 pr_debug("%s: -> \n", __func__);
1365 return ret;
1366}
1367/* SIOC{S,G}IWMODE */
1368static int gelic_wl_set_mode(struct net_device *netdev,
1369 struct iw_request_info *info,
1370 union iwreq_data *data, char *extra)
1371{
1372 __u32 mode = data->mode;
1373 int ret;
1374
1375 pr_debug("%s: <- \n", __func__);
1376 if (mode == IW_MODE_INFRA)
1377 ret = 0;
1378 else
1379 ret = -EOPNOTSUPP;
1380 pr_debug("%s: -> %d\n", __func__, ret);
1381 return ret;
1382}
1383
1384static int gelic_wl_get_mode(struct net_device *netdev,
1385 struct iw_request_info *info,
1386 union iwreq_data *data, char *extra)
1387{
1388 __u32 *mode = &data->mode;
1389 pr_debug("%s: <- \n", __func__);
1390 *mode = IW_MODE_INFRA;
1391 pr_debug("%s: ->\n", __func__);
1392 return 0;
1393}
1394
1395/* SIOCIWFIRSTPRIV */
1396static int hex2bin(u8 *str, u8 *bin, unsigned int len)
1397{
1398 unsigned int i;
1399 static unsigned char *hex = "0123456789ABCDEF";
1400 unsigned char *p, *q;
1401 u8 tmp;
1402
1403 if (len != WPA_PSK_LEN * 2)
1404 return -EINVAL;
1405
1406 for (i = 0; i < WPA_PSK_LEN * 2; i += 2) {
1407 p = strchr(hex, toupper(str[i]));
1408 q = strchr(hex, toupper(str[i + 1]));
1409 if (!p || !q) {
1410 pr_info("%s: unconvertible PSK digit=%d\n",
1411 __func__, i);
1412 return -EINVAL;
1413 }
1414 tmp = ((p - hex) << 4) + (q - hex);
1415 *bin++ = tmp;
1416 }
1417 return 0;
1418};
1419
1420static int gelic_wl_priv_set_psk(struct net_device *net_dev,
1421 struct iw_request_info *info,
1422 union iwreq_data *data, char *extra)
1423{
1424 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1425 unsigned int len;
1426 unsigned int irqflag;
1427 int ret = 0;
1428
1429 pr_debug("%s:<- len=%d\n", __func__, data->data.length);
1430 len = data->data.length - 1;
1431 if (len <= 2)
1432 return -EINVAL;
1433
1434 spin_lock_irqsave(&wl->lock, irqflag);
1435 if (extra[0] == '"' && extra[len - 1] == '"') {
1436 pr_debug("%s: passphrase mode\n", __func__);
1437 /* pass phrase */
1438 if (GELIC_WL_EURUS_PSK_MAX_LEN < (len - 2)) {
1439 pr_info("%s: passphrase too long\n", __func__);
1440 ret = -E2BIG;
1441 goto out;
1442 }
1443 memset(wl->psk, 0, sizeof(wl->psk));
1444 wl->psk_len = len - 2;
1445 memcpy(wl->psk, &(extra[1]), wl->psk_len);
1446 wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
1447 } else {
1448 ret = hex2bin(extra, wl->psk, len);
1449 if (ret)
1450 goto out;
1451 wl->psk_len = WPA_PSK_LEN;
1452 wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
1453 }
1454 set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
1455out:
1456 spin_unlock_irqrestore(&wl->lock, irqflag);
1457 pr_debug("%s:->\n", __func__);
1458 return ret;
1459}
1460
1461static int gelic_wl_priv_get_psk(struct net_device *net_dev,
1462 struct iw_request_info *info,
1463 union iwreq_data *data, char *extra)
1464{
1465 struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
1466 char *p;
1467 unsigned int irqflag;
1468 unsigned int i;
1469
1470 pr_debug("%s:<-\n", __func__);
1471 if (!capable(CAP_NET_ADMIN))
1472 return -EPERM;
1473
1474 spin_lock_irqsave(&wl->lock, irqflag);
1475 p = extra;
1476 if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) {
1477 if (wl->psk_type == GELIC_EURUS_WPA_PSK_BIN) {
1478 for (i = 0; i < wl->psk_len; i++) {
1479 sprintf(p, "%02xu", wl->psk[i]);
1480 p += 2;
1481 }
1482 *p = '\0';
1483 data->data.length = wl->psk_len * 2;
1484 } else {
1485 *p++ = '"';
1486 memcpy(p, wl->psk, wl->psk_len);
1487 p += wl->psk_len;
1488 *p++ = '"';
1489 *p = '\0';
1490 data->data.length = wl->psk_len + 2;
1491 }
1492 } else
1493 /* no psk set */
1494 data->data.length = 0;
1495 spin_unlock_irqrestore(&wl->lock, irqflag);
1496 pr_debug("%s:-> %d\n", __func__, data->data.length);
1497 return 0;
1498}
1499
1500/* SIOCGIWNICKN */
1501static int gelic_wl_get_nick(struct net_device *net_dev,
1502 struct iw_request_info *info,
1503 union iwreq_data *data, char *extra)
1504{
1505 strcpy(extra, "gelic_wl");
1506 data->data.length = strlen(extra);
1507 data->data.flags = 1;
1508 return 0;
1509}
1510
1511
1512/* --- */
1513
1514static struct iw_statistics *gelic_wl_get_wireless_stats(
1515 struct net_device *netdev)
1516{
1517
1518 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
1519 struct gelic_eurus_cmd *cmd;
1520 struct iw_statistics *is;
1521 struct gelic_eurus_rssi_info *rssi;
1522
1523 pr_debug("%s: <-\n", __func__);
1524
1525 is = &wl->iwstat;
1526 memset(is, 0, sizeof(*is));
1527 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_RSSI_CFG,
1528 wl->buf, sizeof(*rssi));
1529 if (cmd && !cmd->status && !cmd->cmd_status) {
1530 rssi = wl->buf;
1531 is->qual.level = be16_to_cpu(rssi->rssi);
1532 is->qual.updated = IW_QUAL_LEVEL_UPDATED |
1533 IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID;
1534 } else
1535 /* not associated */
1536 is->qual.updated = IW_QUAL_ALL_INVALID;
1537
1538 kfree(cmd);
1539 pr_debug("%s: ->\n", __func__);
1540 return is;
1541}
1542
1543/*
1544 * scanning helpers
1545 */
1546static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan)
1547{
1548 struct gelic_eurus_cmd *cmd;
1549 int ret = 0;
1550
1551 pr_debug("%s: <- always=%d\n", __func__, always_scan);
1552 if (down_interruptible(&wl->scan_lock))
1553 return -ERESTARTSYS;
1554
1555 /*
1556 * If already a scan in progress, do not trigger more
1557 */
1558 if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) {
1559 pr_debug("%s: scanning now\n", __func__);
1560 goto out;
1561 }
1562
1563 init_completion(&wl->scan_done);
1564 /*
1565 * If we have already a bss list, don't try to get new
1566 */
1567 if (!always_scan && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
1568 pr_debug("%s: already has the list\n", __func__);
1569 complete(&wl->scan_done);
1570 goto out;
1571 }
1572 /*
1573 * issue start scan request
1574 */
1575 wl->scan_stat = GELIC_WL_SCAN_STAT_SCANNING;
1576 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_START_SCAN,
1577 NULL, 0);
1578 if (!cmd || cmd->status || cmd->cmd_status) {
1579 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
1580 complete(&wl->scan_done);
1581 ret = -ENOMEM;
1582 goto out;
1583 }
1584 kfree(cmd);
1585out:
1586 up(&wl->scan_lock);
1587 pr_debug("%s: ->\n", __func__);
1588 return ret;
1589}
1590
1591/*
1592 * retrieve scan result from the chip (hypervisor)
1593 * this function is invoked by schedule work.
1594 */
1595static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1596{
1597 struct gelic_eurus_cmd *cmd = NULL;
1598 struct gelic_wl_scan_info *target, *tmp;
1599 struct gelic_wl_scan_info *oldest = NULL;
1600 struct gelic_eurus_scan_info *scan_info;
1601 unsigned int scan_info_size;
1602 union iwreq_data data;
1603 unsigned long this_time = jiffies;
1604 unsigned int data_len, i, found, r;
1605 DECLARE_MAC_BUF(mac);
1606
1607 pr_debug("%s:start\n", __func__);
1608 down(&wl->scan_lock);
1609
1610 if (wl->scan_stat != GELIC_WL_SCAN_STAT_SCANNING) {
1611 /*
1612 * stop() may be called while scanning, ignore result
1613 */
1614 pr_debug("%s: scan complete when stat != scanning(%d)\n",
1615 __func__, wl->scan_stat);
1616 goto out;
1617 }
1618
1619 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_SCAN,
1620 wl->buf, PAGE_SIZE);
1621 if (!cmd || cmd->status || cmd->cmd_status) {
1622 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
1623 pr_info("%s:cmd failed\n", __func__);
1624 kfree(cmd);
1625 goto out;
1626 }
1627 data_len = cmd->size;
1628 pr_debug("%s: data_len = %d\n", __func__, data_len);
1629 kfree(cmd);
1630
1631 /* OK, bss list retrieved */
1632 wl->scan_stat = GELIC_WL_SCAN_STAT_GOT_LIST;
1633
1634 /* mark all entries are old */
1635 list_for_each_entry_safe(target, tmp, &wl->network_list, list) {
1636 target->valid = 0;
1637 /* expire too old entries */
1638 if (time_before(target->last_scanned + wl->scan_age,
1639 this_time)) {
1640 kfree(target->hwinfo);
1641 target->hwinfo = NULL;
1642 list_move_tail(&target->list, &wl->network_free_list);
1643 }
1644 }
1645
1646 /* put them in the newtork_list */
1647 scan_info = wl->buf;
1648 scan_info_size = 0;
1649 i = 0;
1650 while (scan_info_size < data_len) {
1651 pr_debug("%s:size=%d bssid=%s scan_info=%p\n", __func__,
1652 be16_to_cpu(scan_info->size),
1653 print_mac(mac, &scan_info->bssid[2]), scan_info);
1654 found = 0;
1655 oldest = NULL;
1656 list_for_each_entry(target, &wl->network_list, list) {
1657 if (!compare_ether_addr(&target->hwinfo->bssid[2],
1658 &scan_info->bssid[2])) {
1659 found = 1;
1660 pr_debug("%s: same BBS found scanned list\n",
1661 __func__);
1662 break;
1663 }
1664 if (!oldest ||
1665 (target->last_scanned < oldest->last_scanned))
1666 oldest = target;
1667 }
1668
1669 if (!found) {
1670 /* not found in the list */
1671 if (list_empty(&wl->network_free_list)) {
1672 /* expire oldest */
1673 target = oldest;
1674 } else {
1675 target = list_entry(wl->network_free_list.next,
1676 struct gelic_wl_scan_info,
1677 list);
1678 }
1679 }
1680
1681 /* update the item */
1682 target->last_scanned = this_time;
1683 target->valid = 1;
1684 target->eurus_index = i;
1685 kfree(target->hwinfo);
1686 target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
1687 GFP_KERNEL);
1688 if (!target->hwinfo) {
1689 pr_info("%s: kzalloc failed\n", __func__);
1690 i++;
1691 scan_info_size += be16_to_cpu(scan_info->size);
1692 scan_info = (void *)scan_info +
1693 be16_to_cpu(scan_info->size);
1694 continue;
1695 }
1696 /* copy hw scan info */
1697 memcpy(target->hwinfo, scan_info, scan_info->size);
1698 target->essid_len = strnlen(scan_info->essid,
1699 sizeof(scan_info->essid));
1700 target->rate_len = 0;
1701 for (r = 0; r < MAX_RATES_LENGTH; r++)
1702 if (scan_info->rate[r])
1703 target->rate_len++;
1704 if (8 < target->rate_len)
1705 pr_info("%s: AP returns %d rates\n", __func__,
1706 target->rate_len);
1707 target->rate_ext_len = 0;
1708 for (r = 0; r < MAX_RATES_EX_LENGTH; r++)
1709 if (scan_info->ext_rate[r])
1710 target->rate_ext_len++;
1711 list_move_tail(&target->list, &wl->network_list);
1712 /* bump pointer */
1713 i++;
1714 scan_info_size += be16_to_cpu(scan_info->size);
1715 scan_info = (void *)scan_info + be16_to_cpu(scan_info->size);
1716 }
1717 memset(&data, 0, sizeof(data));
1718 wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data,
1719 NULL);
1720out:
1721 complete(&wl->scan_done);
1722 up(&wl->scan_lock);
1723 pr_debug("%s:end\n", __func__);
1724}
1725
1726/*
1727 * Select an appropriate bss from current scan list regarding
1728 * current settings from userspace.
1729 * The caller must hold wl->scan_lock,
1730 * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST
1731 */
1732static void update_best(struct gelic_wl_scan_info **best,
1733 struct gelic_wl_scan_info *candid,
1734 int *best_weight,
1735 int *weight)
1736{
1737 if (*best_weight < ++(*weight)) {
1738 *best_weight = *weight;
1739 *best = candid;
1740 }
1741}
1742
1743static
1744struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
1745{
1746 struct gelic_wl_scan_info *scan_info;
1747 struct gelic_wl_scan_info *best_bss;
1748 int weight, best_weight;
1749 u16 security;
1750 DECLARE_MAC_BUF(mac);
1751
1752 pr_debug("%s: <-\n", __func__);
1753
1754 best_bss = NULL;
1755 best_weight = 0;
1756
1757 list_for_each_entry(scan_info, &wl->network_list, list) {
1758 pr_debug("%s: station %p\n", __func__, scan_info);
1759
1760 if (!scan_info->valid) {
1761 pr_debug("%s: station invalid\n", __func__);
1762 continue;
1763 }
1764
1765 /* If bss specified, check it only */
1766 if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) {
1767 if (!compare_ether_addr(&scan_info->hwinfo->bssid[2],
1768 wl->bssid)) {
1769 best_bss = scan_info;
1770 pr_debug("%s: bssid matched\n", __func__);
1771 break;
1772 } else {
1773 pr_debug("%s: bssid unmached\n", __func__);
1774 continue;
1775 }
1776 }
1777
1778 weight = 0;
1779
1780 /* security */
1781 security = be16_to_cpu(scan_info->hwinfo->security) &
1782 GELIC_EURUS_SCAN_SEC_MASK;
1783 if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) {
1784 if (security == GELIC_EURUS_SCAN_SEC_WPA2)
1785 update_best(&best_bss, scan_info,
1786 &best_weight, &weight);
1787 else
1788 continue;
1789 } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA) {
1790 if (security == GELIC_EURUS_SCAN_SEC_WPA)
1791 update_best(&best_bss, scan_info,
1792 &best_weight, &weight);
1793 else
1794 continue;
1795 } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_NONE &&
1796 wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
1797 if (security == GELIC_EURUS_SCAN_SEC_WEP)
1798 update_best(&best_bss, scan_info,
1799 &best_weight, &weight);
1800 else
1801 continue;
1802 }
1803
1804 /* If ESSID is set, check it */
1805 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) {
1806 if ((scan_info->essid_len == wl->essid_len) &&
1807 !strncmp(wl->essid,
1808 scan_info->hwinfo->essid,
1809 scan_info->essid_len))
1810 update_best(&best_bss, scan_info,
1811 &best_weight, &weight);
1812 else
1813 continue;
1814 }
1815 }
1816
1817#ifdef DEBUG
1818 pr_debug("%s: -> bss=%p\n", __func__, best_bss);
1819 if (best_bss) {
1820 pr_debug("%s:addr=%s\n", __func__,
1821 print_mac(mac, &best_bss->hwinfo->bssid[2]));
1822 }
1823#endif
1824 return best_bss;
1825}
1826
1827/*
1828 * Setup WEP configuration to the chip
1829 * The caller must hold wl->scan_lock,
1830 * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST
1831 */
1832static int gelic_wl_do_wep_setup(struct gelic_wl_info *wl)
1833{
1834 unsigned int i;
1835 struct gelic_eurus_wep_cfg *wep;
1836 struct gelic_eurus_cmd *cmd;
1837 int wep104 = 0;
1838 int have_key = 0;
1839 int ret = 0;
1840
1841 pr_debug("%s: <-\n", __func__);
1842 /* we can assume no one should uses the buffer */
1843 wep = wl->buf;
1844 memset(wep, 0, sizeof(*wep));
1845
1846 if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
1847 pr_debug("%s: WEP mode\n", __func__);
1848 for (i = 0; i < GELIC_WEP_KEYS; i++) {
1849 if (!test_bit(i, &wl->key_enabled))
1850 continue;
1851
1852 pr_debug("%s: key#%d enabled\n", __func__, i);
1853 have_key = 1;
1854 if (wl->key_len[i] == 13)
1855 wep104 = 1;
1856 else if (wl->key_len[i] != 5) {
1857 pr_info("%s: wrong wep key[%d]=%d\n",
1858 __func__, i, wl->key_len[i]);
1859 ret = -EINVAL;
1860 goto out;
1861 }
1862 memcpy(wep->key[i], wl->key[i], wl->key_len[i]);
1863 }
1864
1865 if (!have_key) {
1866 pr_info("%s: all wep key disabled\n", __func__);
1867 ret = -EINVAL;
1868 goto out;
1869 }
1870
1871 if (wep104) {
1872 pr_debug("%s: 104bit key\n", __func__);
1873 wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_104BIT);
1874 } else {
1875 pr_debug("%s: 40bit key\n", __func__);
1876 wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_40BIT);
1877 }
1878 } else {
1879 pr_debug("%s: NO encryption\n", __func__);
1880 wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_NONE);
1881 }
1882
1883 /* issue wep setup */
1884 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WEP_CFG,
1885 wep, sizeof(*wep));
1886 if (!cmd)
1887 ret = -ENOMEM;
1888 else if (cmd->status || cmd->cmd_status)
1889 ret = -ENXIO;
1890
1891 kfree(cmd);
1892out:
1893 pr_debug("%s: ->\n", __func__);
1894 return ret;
1895}
1896
1897#ifdef DEBUG
1898static const char *wpasecstr(enum gelic_eurus_wpa_security sec)
1899{
1900 switch (sec) {
1901 case GELIC_EURUS_WPA_SEC_NONE:
1902 return "NONE";
1903 break;
1904 case GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP:
1905 return "WPA_TKIP_TKIP";
1906 break;
1907 case GELIC_EURUS_WPA_SEC_WPA_TKIP_AES:
1908 return "WPA_TKIP_AES";
1909 break;
1910 case GELIC_EURUS_WPA_SEC_WPA_AES_AES:
1911 return "WPA_AES_AES";
1912 break;
1913 case GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP:
1914 return "WPA2_TKIP_TKIP";
1915 break;
1916 case GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES:
1917 return "WPA2_TKIP_AES";
1918 break;
1919 case GELIC_EURUS_WPA_SEC_WPA2_AES_AES:
1920 return "WPA2_AES_AES";
1921 break;
1922 }
1923 return "";
1924};
1925#endif
1926
1927static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
1928{
1929 struct gelic_eurus_wpa_cfg *wpa;
1930 struct gelic_eurus_cmd *cmd;
1931 u16 security;
1932 int ret = 0;
1933
1934 pr_debug("%s: <-\n", __func__);
1935 /* we can assume no one should uses the buffer */
1936 wpa = wl->buf;
1937 memset(wpa, 0, sizeof(*wpa));
1938
1939 if (!test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat))
1940 pr_info("%s: PSK not configured yet\n", __func__);
1941
1942 /* copy key */
1943 memcpy(wpa->psk, wl->psk, wl->psk_len);
1944
1945 /* set security level */
1946 if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) {
1947 if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) {
1948 security = GELIC_EURUS_WPA_SEC_WPA2_AES_AES;
1949 } else {
1950 if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES &&
1951 precise_ie())
1952 security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES;
1953 else
1954 security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP;
1955 }
1956 } else {
1957 if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) {
1958 security = GELIC_EURUS_WPA_SEC_WPA_AES_AES;
1959 } else {
1960 if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES &&
1961 precise_ie())
1962 security = GELIC_EURUS_WPA_SEC_WPA_TKIP_AES;
1963 else
1964 security = GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP;
1965 }
1966 }
1967 wpa->security = cpu_to_be16(security);
1968
1969 /* PSK type */
1970 wpa->psk_type = cpu_to_be16(wl->psk_type);
1971#ifdef DEBUG
1972 pr_debug("%s: sec=%s psktype=%s\nn", __func__,
1973 wpasecstr(wpa->security),
1974 (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
1975 "BIN" : "passphrase");
1976#if 0
1977 /*
1978 * don't enable here if you plan to submit
1979 * the debug log because this dumps your precious
1980 * passphrase/key.
1981 */
1982 pr_debug("%s: psk=%s\n",
1983 (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
1984 (char *)"N/A" : (char *)wpa->psk);
1985#endif
1986#endif
1987 /* issue wpa setup */
1988 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WPA_CFG,
1989 wpa, sizeof(*wpa));
1990 if (!cmd)
1991 ret = -ENOMEM;
1992 else if (cmd->status || cmd->cmd_status)
1993 ret = -ENXIO;
1994 kfree(cmd);
1995 pr_debug("%s: --> %d\n", __func__, ret);
1996 return ret;
1997}
1998
1999/*
2000 * Start association. caller must hold assoc_stat_lock
2001 */
2002static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
2003 struct gelic_wl_scan_info *bss)
2004{
2005 struct gelic_eurus_cmd *cmd;
2006 struct gelic_eurus_common_cfg *common;
2007 int ret = 0;
2008 unsigned long rc;
2009
2010 pr_debug("%s: <-\n", __func__);
2011
2012 /* do common config */
2013 common = wl->buf;
2014 memset(common, 0, sizeof(*common));
2015 common->bss_type = cpu_to_be16(GELIC_EURUS_BSS_INFRA);
2016 common->op_mode = cpu_to_be16(GELIC_EURUS_OPMODE_11BG);
2017
2018 common->scan_index = cpu_to_be16(bss->eurus_index);
2019 switch (wl->auth_method) {
2020 case GELIC_EURUS_AUTH_OPEN:
2021 common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_OPEN);
2022 break;
2023 case GELIC_EURUS_AUTH_SHARED:
2024 common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_SHARED);
2025 break;
2026 }
2027
2028#ifdef DEBUG
2029 scan_list_dump(wl);
2030#endif
2031 pr_debug("%s: common cfg index=%d bsstype=%d auth=%d\n", __func__,
2032 be16_to_cpu(common->scan_index),
2033 be16_to_cpu(common->bss_type),
2034 be16_to_cpu(common->auth_method));
2035
2036 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_COMMON_CFG,
2037 common, sizeof(*common));
2038 if (!cmd || cmd->status || cmd->cmd_status) {
2039 ret = -ENOMEM;
2040 kfree(cmd);
2041 goto out;
2042 }
2043 kfree(cmd);
2044
2045 /* WEP/WPA */
2046 switch (wl->wpa_level) {
2047 case GELIC_WL_WPA_LEVEL_NONE:
2048 /* If WEP or no security, setup WEP config */
2049 ret = gelic_wl_do_wep_setup(wl);
2050 break;
2051 case GELIC_WL_WPA_LEVEL_WPA:
2052 case GELIC_WL_WPA_LEVEL_WPA2:
2053 ret = gelic_wl_do_wpa_setup(wl);
2054 break;
2055 };
2056
2057 if (ret) {
2058 pr_debug("%s: WEP/WPA setup failed %d\n", __func__,
2059 ret);
2060 }
2061
2062 /* start association */
2063 init_completion(&wl->assoc_done);
2064 wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATING;
2065 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_ASSOC,
2066 NULL, 0);
2067 if (!cmd || cmd->status || cmd->cmd_status) {
2068 pr_debug("%s: assoc request failed\n", __func__);
2069 wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
2070 kfree(cmd);
2071 ret = -ENOMEM;
2072 gelic_wl_send_iwap_event(wl, NULL);
2073 goto out;
2074 }
2075 kfree(cmd);
2076
2077 /* wait for connected event */
2078 rc = wait_for_completion_timeout(&wl->assoc_done, HZ * 4);/*FIXME*/
2079
2080 if (!rc) {
2081 /* timeouted. Maybe key or cyrpt mode is wrong */
2082 pr_info("%s: connect timeout \n", __func__);
2083 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
2084 NULL, 0);
2085 kfree(cmd);
2086 wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
2087 gelic_wl_send_iwap_event(wl, NULL);
2088 ret = -ENXIO;
2089 } else {
2090 wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATED;
2091 /* copy bssid */
2092 memcpy(wl->active_bssid, &bss->hwinfo->bssid[2], ETH_ALEN);
2093
2094 /* send connect event */
2095 gelic_wl_send_iwap_event(wl, wl->active_bssid);
2096 pr_info("%s: connected\n", __func__);
2097 }
2098out:
2099 pr_debug("%s: ->\n", __func__);
2100 return ret;
2101}
2102
2103/*
2104 * connected event
2105 */
2106static void gelic_wl_connected_event(struct gelic_wl_info *wl,
2107 u64 event)
2108{
2109 u64 desired_event = 0;
2110
2111 switch (wl->wpa_level) {
2112 case GELIC_WL_WPA_LEVEL_NONE:
2113 desired_event = GELIC_LV1_WL_EVENT_CONNECTED;
2114 break;
2115 case GELIC_WL_WPA_LEVEL_WPA:
2116 case GELIC_WL_WPA_LEVEL_WPA2:
2117 desired_event = GELIC_LV1_WL_EVENT_WPA_CONNECTED;
2118 break;
2119 }
2120
2121 if (desired_event == event) {
2122 pr_debug("%s: completed \n", __func__);
2123 complete(&wl->assoc_done);
2124 netif_carrier_on(port_to_netdev(wl_port(wl)));
2125 } else
2126 pr_debug("%s: event %#lx under wpa\n",
2127 __func__, event);
2128}
2129
2130/*
2131 * disconnect event
2132 */
2133static void gelic_wl_disconnect_event(struct gelic_wl_info *wl,
2134 u64 event)
2135{
2136 struct gelic_eurus_cmd *cmd;
2137 int lock;
2138
2139 /*
2140 * If we fall here in the middle of association,
2141 * associate_bss() should be waiting for complation of
2142 * wl->assoc_done.
2143 * As it waits with timeout, just leave assoc_done
2144 * uncompleted, then it terminates with timeout
2145 */
2146 if (down_trylock(&wl->assoc_stat_lock)) {
2147 pr_debug("%s: already locked\n", __func__);
2148 lock = 0;
2149 } else {
2150 pr_debug("%s: obtain lock\n", __func__);
2151 lock = 1;
2152 }
2153
2154 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0);
2155 kfree(cmd);
2156
2157 /* send disconnected event to the supplicant */
2158 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
2159 gelic_wl_send_iwap_event(wl, NULL);
2160
2161 wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
2162 netif_carrier_off(port_to_netdev(wl_port(wl)));
2163
2164 if (lock)
2165 up(&wl->assoc_stat_lock);
2166}
2167/*
2168 * event worker
2169 */
2170#ifdef DEBUG
2171static const char *eventstr(enum gelic_lv1_wl_event event)
2172{
2173 static char buf[32];
2174 char *ret;
2175 if (event & GELIC_LV1_WL_EVENT_DEVICE_READY)
2176 ret = "EURUS_READY";
2177 else if (event & GELIC_LV1_WL_EVENT_SCAN_COMPLETED)
2178 ret = "SCAN_COMPLETED";
2179 else if (event & GELIC_LV1_WL_EVENT_DEAUTH)
2180 ret = "DEAUTH";
2181 else if (event & GELIC_LV1_WL_EVENT_BEACON_LOST)
2182 ret = "BEACON_LOST";
2183 else if (event & GELIC_LV1_WL_EVENT_CONNECTED)
2184 ret = "CONNECTED";
2185 else if (event & GELIC_LV1_WL_EVENT_WPA_CONNECTED)
2186 ret = "WPA_CONNECTED";
2187 else if (event & GELIC_LV1_WL_EVENT_WPA_ERROR)
2188 ret = "WPA_ERROR";
2189 else {
2190 sprintf(buf, "Unknown(%#x)", event);
2191 ret = buf;
2192 }
2193 return ret;
2194}
2195#else
2196static const char *eventstr(enum gelic_lv1_wl_event event)
2197{
2198 return NULL;
2199}
2200#endif
2201static void gelic_wl_event_worker(struct work_struct *work)
2202{
2203 struct gelic_wl_info *wl;
2204 struct gelic_port *port;
2205 u64 event, tmp;
2206 int status;
2207
2208 pr_debug("%s:start\n", __func__);
2209 wl = container_of(work, struct gelic_wl_info, event_work.work);
2210 port = wl_port(wl);
2211 while (1) {
2212 status = lv1_net_control(bus_id(port->card), dev_id(port->card),
2213 GELIC_LV1_GET_WLAN_EVENT, 0, 0, 0,
2214 &event, &tmp);
2215 if (status) {
2216 if (status != LV1_NO_ENTRY)
2217 pr_debug("%s:wlan event failed %d\n",
2218 __func__, status);
2219 /* got all events */
2220 pr_debug("%s:end\n", __func__);
2221 return;
2222 }
2223 pr_debug("%s: event=%s\n", __func__, eventstr(event));
2224 switch (event) {
2225 case GELIC_LV1_WL_EVENT_SCAN_COMPLETED:
2226 gelic_wl_scan_complete_event(wl);
2227 break;
2228 case GELIC_LV1_WL_EVENT_BEACON_LOST:
2229 case GELIC_LV1_WL_EVENT_DEAUTH:
2230 gelic_wl_disconnect_event(wl, event);
2231 break;
2232 case GELIC_LV1_WL_EVENT_CONNECTED:
2233 case GELIC_LV1_WL_EVENT_WPA_CONNECTED:
2234 gelic_wl_connected_event(wl, event);
2235 break;
2236 default:
2237 break;
2238 }
2239 } /* while */
2240}
2241/*
2242 * association worker
2243 */
2244static void gelic_wl_assoc_worker(struct work_struct *work)
2245{
2246 struct gelic_wl_info *wl;
2247
2248 struct gelic_wl_scan_info *best_bss;
2249 int ret;
2250
2251 wl = container_of(work, struct gelic_wl_info, assoc_work.work);
2252
2253 down(&wl->assoc_stat_lock);
2254
2255 if (wl->assoc_stat != GELIC_WL_ASSOC_STAT_DISCONN)
2256 goto out;
2257
2258 ret = gelic_wl_start_scan(wl, 0);
2259 if (ret == -ERESTARTSYS) {
2260 pr_debug("%s: scan start failed association\n", __func__);
2261 schedule_delayed_work(&wl->assoc_work, HZ/10); /*FIXME*/
2262 goto out;
2263 } else if (ret) {
2264 pr_info("%s: scan prerequisite failed\n", __func__);
2265 goto out;
2266 }
2267
2268 /*
2269 * Wait for bss scan completion
2270 * If we have scan list already, gelic_wl_start_scan()
2271 * returns OK and raises the complete. Thus,
2272 * it's ok to wait unconditionally here
2273 */
2274 wait_for_completion(&wl->scan_done);
2275
2276 pr_debug("%s: scan done\n", __func__);
2277 down(&wl->scan_lock);
2278 if (wl->scan_stat != GELIC_WL_SCAN_STAT_GOT_LIST) {
2279 gelic_wl_send_iwap_event(wl, NULL);
2280 pr_info("%s: no scan list. association failed\n", __func__);
2281 goto scan_lock_out;
2282 }
2283
2284 /* find best matching bss */
2285 best_bss = gelic_wl_find_best_bss(wl);
2286 if (!best_bss) {
2287 gelic_wl_send_iwap_event(wl, NULL);
2288 pr_info("%s: no bss matched. association failed\n", __func__);
2289 goto scan_lock_out;
2290 }
2291
2292 /* ok, do association */
2293 ret = gelic_wl_associate_bss(wl, best_bss);
2294 if (ret)
2295 pr_info("%s: association failed %d\n", __func__, ret);
2296scan_lock_out:
2297 up(&wl->scan_lock);
2298out:
2299 up(&wl->assoc_stat_lock);
2300}
2301/*
2302 * Interrupt handler
2303 * Called from the ethernet interrupt handler
2304 * Processes wireless specific virtual interrupts only
2305 */
2306void gelic_wl_interrupt(struct net_device *netdev, u64 status)
2307{
2308 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
2309
2310 if (status & GELIC_CARD_WLAN_COMMAND_COMPLETED) {
2311 pr_debug("%s:cmd complete\n", __func__);
2312 complete(&wl->cmd_done_intr);
2313 }
2314
2315 if (status & GELIC_CARD_WLAN_EVENT_RECEIVED) {
2316 pr_debug("%s:event received\n", __func__);
2317 queue_delayed_work(wl->event_queue, &wl->event_work, 0);
2318 }
2319}
2320
2321/*
2322 * driver helpers
2323 */
2324#define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT]
2325static const iw_handler gelic_wl_wext_handler[] =
2326{
2327 IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name,
2328 IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range,
2329 IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan,
2330 IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan,
2331 IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth,
2332 IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth,
2333 IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid,
2334 IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid,
2335 IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode,
2336 IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode,
2337 IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap,
2338 IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap,
2339 IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext,
2340 IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext,
2341 IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode,
2342 IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode,
2343 IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
2344};
2345
2346static struct iw_priv_args gelic_wl_private_args[] =
2347{
2348 {
2349 .cmd = GELIC_WL_PRIV_SET_PSK,
2350 .set_args = IW_PRIV_TYPE_CHAR |
2351 (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
2352 .name = "set_psk"
2353 },
2354 {
2355 .cmd = GELIC_WL_PRIV_GET_PSK,
2356 .get_args = IW_PRIV_TYPE_CHAR |
2357 (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
2358 .name = "get_psk"
2359 }
2360};
2361
2362static const iw_handler gelic_wl_private_handler[] =
2363{
2364 gelic_wl_priv_set_psk,
2365 gelic_wl_priv_get_psk,
2366};
2367
2368static const struct iw_handler_def gelic_wl_wext_handler_def = {
2369 .num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
2370 .standard = gelic_wl_wext_handler,
2371 .get_wireless_stats = gelic_wl_get_wireless_stats,
2372 .num_private = ARRAY_SIZE(gelic_wl_private_handler),
2373 .num_private_args = ARRAY_SIZE(gelic_wl_private_args),
2374 .private = gelic_wl_private_handler,
2375 .private_args = gelic_wl_private_args,
2376};
2377
2378static struct net_device *gelic_wl_alloc(struct gelic_card *card)
2379{
2380 struct net_device *netdev;
2381 struct gelic_port *port;
2382 struct gelic_wl_info *wl;
2383 unsigned int i;
2384
2385 pr_debug("%s:start\n", __func__);
2386 netdev = alloc_etherdev(sizeof(struct gelic_port) +
2387 sizeof(struct gelic_wl_info));
2388 pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card);
2389 if (!netdev)
2390 return NULL;
2391
2392 port = netdev_priv(netdev);
2393 port->netdev = netdev;
2394 port->card = card;
2395 port->type = GELIC_PORT_WIRELESS;
2396
2397 wl = port_wl(port);
2398 pr_debug("%s: wl=%p port=%p\n", __func__, wl, port);
2399
2400 /* allocate scan list */
2401 wl->networks = kzalloc(sizeof(struct gelic_wl_scan_info) *
2402 GELIC_WL_BSS_MAX_ENT, GFP_KERNEL);
2403
2404 if (!wl->networks)
2405 goto fail_bss;
2406
2407 wl->eurus_cmd_queue = create_singlethread_workqueue("gelic_cmd");
2408 if (!wl->eurus_cmd_queue)
2409 goto fail_cmd_workqueue;
2410
2411 wl->event_queue = create_singlethread_workqueue("gelic_event");
2412 if (!wl->event_queue)
2413 goto fail_event_workqueue;
2414
2415 INIT_LIST_HEAD(&wl->network_free_list);
2416 INIT_LIST_HEAD(&wl->network_list);
2417 for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++)
2418 list_add_tail(&wl->networks[i].list,
2419 &wl->network_free_list);
2420 init_completion(&wl->cmd_done_intr);
2421
2422 INIT_DELAYED_WORK(&wl->event_work, gelic_wl_event_worker);
2423 INIT_DELAYED_WORK(&wl->assoc_work, gelic_wl_assoc_worker);
2424 init_MUTEX(&wl->scan_lock);
2425 init_MUTEX(&wl->assoc_stat_lock);
2426
2427 init_completion(&wl->scan_done);
2428 /* for the case that no scan request is issued and stop() is called */
2429 complete(&wl->scan_done);
2430
2431 spin_lock_init(&wl->lock);
2432
2433 wl->scan_age = 5*HZ; /* FIXME */
2434
2435 /* buffer for receiving scanned list etc */
2436 BUILD_BUG_ON(PAGE_SIZE <
2437 sizeof(struct gelic_eurus_scan_info) *
2438 GELIC_EURUS_MAX_SCAN);
2439 wl->buf = (void *)get_zeroed_page(GFP_KERNEL);
2440 if (!wl->buf) {
2441 pr_info("%s:buffer allocation failed\n", __func__);
2442 goto fail_getpage;
2443 }
2444 pr_debug("%s:end\n", __func__);
2445 return netdev;
2446
2447fail_getpage:
2448 destroy_workqueue(wl->event_queue);
2449fail_event_workqueue:
2450 destroy_workqueue(wl->eurus_cmd_queue);
2451fail_cmd_workqueue:
2452 kfree(wl->networks);
2453fail_bss:
2454 free_netdev(netdev);
2455 pr_debug("%s:end error\n", __func__);
2456 return NULL;
2457
2458}
2459
2460static void gelic_wl_free(struct gelic_wl_info *wl)
2461{
2462 struct gelic_wl_scan_info *scan_info;
2463 unsigned int i;
2464
2465 pr_debug("%s: <-\n", __func__);
2466
2467 pr_debug("%s: destroy queues\n", __func__);
2468 destroy_workqueue(wl->eurus_cmd_queue);
2469 destroy_workqueue(wl->event_queue);
2470
2471 scan_info = wl->networks;
2472 for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++, scan_info++)
2473 kfree(scan_info->hwinfo);
2474 kfree(wl->networks);
2475
2476 free_netdev(port_to_netdev(wl_port(wl)));
2477
2478 pr_debug("%s: ->\n", __func__);
2479}
2480
2481static int gelic_wl_try_associate(struct net_device *netdev)
2482{
2483 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
2484 int ret = -1;
2485 unsigned int i;
2486
2487 pr_debug("%s: <-\n", __func__);
2488
2489 /* check constraits for start association */
2490 /* for no access restriction AP */
2491 if (wl->group_cipher_method == GELIC_WL_CIPHER_NONE) {
2492 if (test_bit(GELIC_WL_STAT_CONFIGURED,
2493 &wl->stat))
2494 goto do_associate;
2495 else {
2496 pr_debug("%s: no wep, not configured\n", __func__);
2497 return ret;
2498 }
2499 }
2500
2501 /* for WEP, one of four keys should be set */
2502 if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) {
2503 /* one of keys set */
2504 for (i = 0; i < GELIC_WEP_KEYS; i++) {
2505 if (test_bit(i, &wl->key_enabled))
2506 goto do_associate;
2507 }
2508 pr_debug("%s: WEP, but no key specified\n", __func__);
2509 return ret;
2510 }
2511
2512 /* for WPA[2], psk should be set */
2513 if ((wl->group_cipher_method == GELIC_WL_CIPHER_TKIP) ||
2514 (wl->group_cipher_method == GELIC_WL_CIPHER_AES)) {
2515 if (test_bit(GELIC_WL_STAT_WPA_PSK_SET,
2516 &wl->stat))
2517 goto do_associate;
2518 else {
2519 pr_debug("%s: AES/TKIP, but PSK not configured\n",
2520 __func__);
2521 return ret;
2522 }
2523 }
2524
2525do_associate:
2526 ret = schedule_delayed_work(&wl->assoc_work, 0);
2527 pr_debug("%s: start association work %d\n", __func__, ret);
2528 return ret;
2529}
2530
2531/*
2532 * netdev handlers
2533 */
2534static int gelic_wl_open(struct net_device *netdev)
2535{
2536 struct gelic_card *card = netdev_card(netdev);
2537
2538 pr_debug("%s:->%p\n", __func__, netdev);
2539
2540 gelic_card_up(card);
2541
2542 /* try to associate */
2543 gelic_wl_try_associate(netdev);
2544
2545 netif_start_queue(netdev);
2546
2547 pr_debug("%s:<-\n", __func__);
2548 return 0;
2549}
2550
2551/*
2552 * reset state machine
2553 */
2554static int gelic_wl_reset_state(struct gelic_wl_info *wl)
2555{
2556 struct gelic_wl_scan_info *target;
2557 struct gelic_wl_scan_info *tmp;
2558
2559 /* empty scan list */
2560 list_for_each_entry_safe(target, tmp, &wl->network_list, list) {
2561 list_move_tail(&target->list, &wl->network_free_list);
2562 }
2563 wl->scan_stat = GELIC_WL_SCAN_STAT_INIT;
2564
2565 /* clear configuration */
2566 wl->auth_method = GELIC_EURUS_AUTH_OPEN;
2567 wl->group_cipher_method = GELIC_WL_CIPHER_NONE;
2568 wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE;
2569 wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE;
2570
2571 wl->key_enabled = 0;
2572 wl->current_key = 0;
2573
2574 wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
2575 wl->psk_len = 0;
2576
2577 wl->essid_len = 0;
2578 memset(wl->essid, 0, sizeof(wl->essid));
2579 memset(wl->bssid, 0, sizeof(wl->bssid));
2580 memset(wl->active_bssid, 0, sizeof(wl->active_bssid));
2581
2582 wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN;
2583
2584 memset(&wl->iwstat, 0, sizeof(wl->iwstat));
2585 /* all status bit clear */
2586 wl->stat = 0;
2587 return 0;
2588}
2589
2590/*
2591 * Tell eurus to terminate association
2592 */
2593static void gelic_wl_disconnect(struct net_device *netdev)
2594{
2595 struct gelic_port *port = netdev_priv(netdev);
2596 struct gelic_wl_info *wl = port_wl(port);
2597 struct gelic_eurus_cmd *cmd;
2598
2599 /*
2600 * If scann process is running on chip,
2601 * further requests will be rejected
2602 */
2603 if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING)
2604 wait_for_completion_timeout(&wl->scan_done, HZ);
2605
2606 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0);
2607 kfree(cmd);
2608 gelic_wl_send_iwap_event(wl, NULL);
2609};
2610
2611static int gelic_wl_stop(struct net_device *netdev)
2612{
2613 struct gelic_port *port = netdev_priv(netdev);
2614 struct gelic_wl_info *wl = port_wl(port);
2615 struct gelic_card *card = netdev_card(netdev);
2616
2617 pr_debug("%s:<-\n", __func__);
2618
2619 /*
2620 * Cancel pending association work.
2621 * event work can run after netdev down
2622 */
2623 cancel_delayed_work(&wl->assoc_work);
2624
2625 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
2626 gelic_wl_disconnect(netdev);
2627
2628 /* reset our state machine */
2629 gelic_wl_reset_state(wl);
2630
2631 netif_stop_queue(netdev);
2632
2633 gelic_card_down(card);
2634
2635 pr_debug("%s:->\n", __func__);
2636 return 0;
2637}
2638
2639/* -- */
2640
2641static struct ethtool_ops gelic_wl_ethtool_ops = {
2642 .get_drvinfo = gelic_net_get_drvinfo,
2643 .get_link = gelic_wl_get_link,
2644 .get_tx_csum = ethtool_op_get_tx_csum,
2645 .set_tx_csum = ethtool_op_set_tx_csum,
2646 .get_rx_csum = gelic_net_get_rx_csum,
2647 .set_rx_csum = gelic_net_set_rx_csum,
2648};
2649
2650static void gelic_wl_setup_netdev_ops(struct net_device *netdev)
2651{
2652 struct gelic_wl_info *wl;
2653 wl = port_wl(netdev_priv(netdev));
2654 BUG_ON(!wl);
2655 netdev->open = &gelic_wl_open;
2656 netdev->stop = &gelic_wl_stop;
2657 netdev->hard_start_xmit = &gelic_net_xmit;
2658 netdev->set_multicast_list = &gelic_net_set_multi;
2659 netdev->change_mtu = &gelic_net_change_mtu;
2660 netdev->wireless_data = &wl->wireless_data;
2661 netdev->wireless_handlers = &gelic_wl_wext_handler_def;
2662 /* tx watchdog */
2663 netdev->tx_timeout = &gelic_net_tx_timeout;
2664 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
2665
2666 netdev->ethtool_ops = &gelic_wl_ethtool_ops;
2667#ifdef CONFIG_NET_POLL_CONTROLLER
2668 netdev->poll_controller = gelic_net_poll_controller;
2669#endif
2670}
2671
2672/*
2673 * driver probe/remove
2674 */
2675int gelic_wl_driver_probe(struct gelic_card *card)
2676{
2677 int ret;
2678 struct net_device *netdev;
2679
2680 pr_debug("%s:start\n", __func__);
2681
2682 if (ps3_compare_firmware_version(1, 6, 0) < 0)
2683 return 0;
2684 if (!card->vlan[GELIC_PORT_WIRELESS].tx)
2685 return 0;
2686
2687 /* alloc netdevice for wireless */
2688 netdev = gelic_wl_alloc(card);
2689 if (!netdev)
2690 return -ENOMEM;
2691
2692 /* setup net_device structure */
2693 gelic_wl_setup_netdev_ops(netdev);
2694
2695 /* setup some of net_device and register it */
2696 ret = gelic_net_setup_netdev(netdev, card);
2697 if (ret)
2698 goto fail_setup;
2699 card->netdev[GELIC_PORT_WIRELESS] = netdev;
2700
2701 /* add enable wireless interrupt */
2702 card->irq_mask |= GELIC_CARD_WLAN_EVENT_RECEIVED |
2703 GELIC_CARD_WLAN_COMMAND_COMPLETED;
2704 /* to allow wireless commands while both interfaces are down */
2705 gelic_card_set_irq_mask(card, GELIC_CARD_WLAN_EVENT_RECEIVED |
2706 GELIC_CARD_WLAN_COMMAND_COMPLETED);
2707 pr_debug("%s:end\n", __func__);
2708 return 0;
2709
2710fail_setup:
2711 gelic_wl_free(port_wl(netdev_port(netdev)));
2712
2713 return ret;
2714}
2715
2716int gelic_wl_driver_remove(struct gelic_card *card)
2717{
2718 struct gelic_wl_info *wl;
2719 struct net_device *netdev;
2720
2721 pr_debug("%s:start\n", __func__);
2722
2723 if (ps3_compare_firmware_version(1, 6, 0) < 0)
2724 return 0;
2725 if (!card->vlan[GELIC_PORT_WIRELESS].tx)
2726 return 0;
2727
2728 netdev = card->netdev[GELIC_PORT_WIRELESS];
2729 wl = port_wl(netdev_priv(netdev));
2730
2731 /* if the interface was not up, but associated */
2732 if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED)
2733 gelic_wl_disconnect(netdev);
2734
2735 complete(&wl->cmd_done_intr);
2736
2737 /* cancel all work queue */
2738 cancel_delayed_work(&wl->assoc_work);
2739 cancel_delayed_work(&wl->event_work);
2740 flush_workqueue(wl->eurus_cmd_queue);
2741 flush_workqueue(wl->event_queue);
2742
2743 unregister_netdev(netdev);
2744
2745 /* disable wireless interrupt */
2746 pr_debug("%s: disable intr\n", __func__);
2747 card->irq_mask &= ~(GELIC_CARD_WLAN_EVENT_RECEIVED |
2748 GELIC_CARD_WLAN_COMMAND_COMPLETED);
2749 /* free bss list, netdev*/
2750 gelic_wl_free(wl);
2751 pr_debug("%s:end\n", __func__);
2752 return 0;
2753}
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h
new file mode 100644
index 000000000000..103697166720
--- /dev/null
+++ b/drivers/net/ps3_gelic_wireless.h
@@ -0,0 +1,329 @@
1/*
2 * PS3 gelic network driver.
3 *
4 * Copyright (C) 2007 Sony Computer Entertainment Inc.
5 * Copyright 2007 Sony Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation version 2.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#ifndef _GELIC_WIRELESS_H
21#define _GELIC_WIRELESS_H
22
23#include <linux/wireless.h>
24#include <net/iw_handler.h>
25
26
27/* return value from GELIC_LV1_GET_WLAN_EVENT netcontrol */
28enum gelic_lv1_wl_event {
29 GELIC_LV1_WL_EVENT_DEVICE_READY = 0x01, /* Eurus ready */
30 GELIC_LV1_WL_EVENT_SCAN_COMPLETED = 0x02, /* Scan has completed */
31 GELIC_LV1_WL_EVENT_DEAUTH = 0x04, /* Deauthed by the AP */
32 GELIC_LV1_WL_EVENT_BEACON_LOST = 0x08, /* Beacon lost detected */
33 GELIC_LV1_WL_EVENT_CONNECTED = 0x10, /* Connected to AP */
34 GELIC_LV1_WL_EVENT_WPA_CONNECTED = 0x20, /* WPA connection */
35 GELIC_LV1_WL_EVENT_WPA_ERROR = 0x40, /* MIC error */
36};
37
38/* arguments for GELIC_LV1_POST_WLAN_COMMAND netcontrol */
39enum gelic_eurus_command {
40 GELIC_EURUS_CMD_ASSOC = 1, /* association start */
41 GELIC_EURUS_CMD_DISASSOC = 2, /* disassociate */
42 GELIC_EURUS_CMD_START_SCAN = 3, /* scan start */
43 GELIC_EURUS_CMD_GET_SCAN = 4, /* get scan result */
44 GELIC_EURUS_CMD_SET_COMMON_CFG = 5, /* set common config */
45 GELIC_EURUS_CMD_GET_COMMON_CFG = 6, /* set common config */
46 GELIC_EURUS_CMD_SET_WEP_CFG = 7, /* set WEP config */
47 GELIC_EURUS_CMD_GET_WEP_CFG = 8, /* get WEP config */
48 GELIC_EURUS_CMD_SET_WPA_CFG = 9, /* set WPA config */
49 GELIC_EURUS_CMD_GET_WPA_CFG = 10, /* get WPA config */
50 GELIC_EURUS_CMD_GET_RSSI_CFG = 11, /* get RSSI info. */
51 GELIC_EURUS_CMD_MAX_INDEX
52};
53
54/* for GELIC_EURUS_CMD_COMMON_CFG */
55enum gelic_eurus_bss_type {
56 GELIC_EURUS_BSS_INFRA = 0,
57 GELIC_EURUS_BSS_ADHOC = 1, /* not supported */
58};
59
60enum gelic_eurus_auth_method {
61 GELIC_EURUS_AUTH_OPEN = 0, /* FIXME: WLAN_AUTH_OPEN */
62 GELIC_EURUS_AUTH_SHARED = 1, /* not supported */
63};
64
65enum gelic_eurus_opmode {
66 GELIC_EURUS_OPMODE_11BG = 0, /* 802.11b/g */
67 GELIC_EURUS_OPMODE_11B = 1, /* 802.11b only */
68 GELIC_EURUS_OPMODE_11G = 2, /* 802.11g only */
69};
70
71struct gelic_eurus_common_cfg {
72 /* all fields are big endian */
73 u16 scan_index;
74 u16 bss_type; /* infra or adhoc */
75 u16 auth_method; /* shared key or open */
76 u16 op_mode; /* B/G */
77} __attribute__((packed));
78
79
80/* for GELIC_EURUS_CMD_WEP_CFG */
81enum gelic_eurus_wep_security {
82 GELIC_EURUS_WEP_SEC_NONE = 0,
83 GELIC_EURUS_WEP_SEC_40BIT = 1,
84 GELIC_EURUS_WEP_SEC_104BIT = 2,
85};
86
87struct gelic_eurus_wep_cfg {
88 /* all fields are big endian */
89 u16 security;
90 u8 key[4][16];
91} __attribute__((packed));
92
93/* for GELIC_EURUS_CMD_WPA_CFG */
94enum gelic_eurus_wpa_security {
95 GELIC_EURUS_WPA_SEC_NONE = 0x0000,
96 /* group=TKIP, pairwise=TKIP */
97 GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP = 0x0001,
98 /* group=AES, pairwise=AES */
99 GELIC_EURUS_WPA_SEC_WPA_AES_AES = 0x0002,
100 /* group=TKIP, pairwise=TKIP */
101 GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP = 0x0004,
102 /* group=AES, pairwise=AES */
103 GELIC_EURUS_WPA_SEC_WPA2_AES_AES = 0x0008,
104 /* group=TKIP, pairwise=AES */
105 GELIC_EURUS_WPA_SEC_WPA_TKIP_AES = 0x0010,
106 /* group=TKIP, pairwise=AES */
107 GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES = 0x0020,
108};
109
110enum gelic_eurus_wpa_psk_type {
111 GELIC_EURUS_WPA_PSK_PASSPHRASE = 0, /* passphrase string */
112 GELIC_EURUS_WPA_PSK_BIN = 1, /* 32 bytes binary key */
113};
114
115#define GELIC_WL_EURUS_PSK_MAX_LEN 64
116#define WPA_PSK_LEN 32 /* WPA spec says 256bit */
117
118struct gelic_eurus_wpa_cfg {
119 /* all fields are big endian */
120 u16 security;
121 u16 psk_type; /* psk key encoding type */
122 u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */
123} __attribute__((packed));
124
125/* for GELIC_EURUS_CMD_{START,GET}_SCAN */
126enum gelic_eurus_scan_capability {
127 GELIC_EURUS_SCAN_CAP_ADHOC = 0x0000,
128 GELIC_EURUS_SCAN_CAP_INFRA = 0x0001,
129 GELIC_EURUS_SCAN_CAP_MASK = 0x0001,
130};
131
132enum gelic_eurus_scan_sec_type {
133 GELIC_EURUS_SCAN_SEC_NONE = 0x0000,
134 GELIC_EURUS_SCAN_SEC_WEP = 0x0100,
135 GELIC_EURUS_SCAN_SEC_WPA = 0x0200,
136 GELIC_EURUS_SCAN_SEC_WPA2 = 0x0400,
137 GELIC_EURUS_SCAN_SEC_MASK = 0x0f00,
138};
139
140enum gelic_eurus_scan_sec_wep_type {
141 GELIC_EURUS_SCAN_SEC_WEP_UNKNOWN = 0x0000,
142 GELIC_EURUS_SCAN_SEC_WEP_40 = 0x0001,
143 GELIC_EURUS_SCAN_SEC_WEP_104 = 0x0002,
144 GELIC_EURUS_SCAN_SEC_WEP_MASK = 0x0003,
145};
146
147enum gelic_eurus_scan_sec_wpa_type {
148 GELIC_EURUS_SCAN_SEC_WPA_UNKNOWN = 0x0000,
149 GELIC_EURUS_SCAN_SEC_WPA_TKIP = 0x0001,
150 GELIC_EURUS_SCAN_SEC_WPA_AES = 0x0002,
151 GELIC_EURUS_SCAN_SEC_WPA_MASK = 0x0003,
152};
153
154/*
155 * hw BSS information structure returned from GELIC_EURUS_CMD_GET_SCAN
156 */
157struct gelic_eurus_scan_info {
158 /* all fields are big endian */
159 __be16 size;
160 __be16 rssi; /* percentage */
161 __be16 channel; /* channel number */
162 __be16 beacon_period; /* FIXME: in msec unit */
163 __be16 capability;
164 __be16 security;
165 u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */
166 u8 essid[32]; /* IW_ESSID_MAX_SIZE */
167 u8 rate[16]; /* first MAX_RATES_LENGTH(12) are valid */
168 u8 ext_rate[16]; /* first MAX_RATES_EX_LENGTH(16) are valid */
169 __be32 reserved1;
170 __be32 reserved2;
171 __be32 reserved3;
172 __be32 reserved4;
173 u8 elements[0]; /* ie */
174} __attribute__ ((packed));
175
176/* the hypervisor returns bbs up to 16 */
177#define GELIC_EURUS_MAX_SCAN (16)
178struct gelic_wl_scan_info {
179 struct list_head list;
180 struct gelic_eurus_scan_info *hwinfo;
181
182 int valid; /* set 1 if this entry was in latest scanned list
183 * from Eurus */
184 unsigned int eurus_index; /* index in the Eurus list */
185 unsigned long last_scanned; /* acquired time */
186
187 unsigned int rate_len;
188 unsigned int rate_ext_len;
189 unsigned int essid_len;
190};
191
192/* for GELIC_EURUS_CMD_GET_RSSI */
193struct gelic_eurus_rssi_info {
194 /* big endian */
195 __be16 rssi;
196} __attribute__ ((packed));
197
198
199/* for 'stat' member of gelic_wl_info */
200enum gelic_wl_info_status_bit {
201 GELIC_WL_STAT_CONFIGURED,
202 GELIC_WL_STAT_CH_INFO, /* ch info aquired */
203 GELIC_WL_STAT_ESSID_SET, /* ESSID specified by userspace */
204 GELIC_WL_STAT_BSSID_SET, /* BSSID specified by userspace */
205 GELIC_WL_STAT_WPA_PSK_SET, /* PMK specified by userspace */
206 GELIC_WL_STAT_WPA_LEVEL_SET, /* WEP or WPA[2] selected */
207};
208
209/* for 'scan_stat' member of gelic_wl_info */
210enum gelic_wl_scan_state {
211 /* just initialized or get last scan result failed */
212 GELIC_WL_SCAN_STAT_INIT,
213 /* scan request issued, accepted or chip is scanning */
214 GELIC_WL_SCAN_STAT_SCANNING,
215 /* scan results retrieved */
216 GELIC_WL_SCAN_STAT_GOT_LIST,
217};
218
219/* for 'cipher_method' */
220enum gelic_wl_cipher_method {
221 GELIC_WL_CIPHER_NONE,
222 GELIC_WL_CIPHER_WEP,
223 GELIC_WL_CIPHER_TKIP,
224 GELIC_WL_CIPHER_AES,
225};
226
227/* for 'wpa_level' */
228enum gelic_wl_wpa_level {
229 GELIC_WL_WPA_LEVEL_NONE,
230 GELIC_WL_WPA_LEVEL_WPA,
231 GELIC_WL_WPA_LEVEL_WPA2,
232};
233
234/* for 'assoc_stat' */
235enum gelic_wl_assoc_state {
236 GELIC_WL_ASSOC_STAT_DISCONN,
237 GELIC_WL_ASSOC_STAT_ASSOCIATING,
238 GELIC_WL_ASSOC_STAT_ASSOCIATED,
239};
240/* part of private data alloc_etherdev() allocated */
241#define GELIC_WEP_KEYS 4
242struct gelic_wl_info {
243 /* bss list */
244 struct semaphore scan_lock;
245 struct list_head network_list;
246 struct list_head network_free_list;
247 struct gelic_wl_scan_info *networks;
248
249 unsigned long scan_age; /* last scanned time */
250 enum gelic_wl_scan_state scan_stat;
251 struct completion scan_done;
252
253 /* eurus command queue */
254 struct workqueue_struct *eurus_cmd_queue;
255 struct completion cmd_done_intr;
256
257 /* eurus event handling */
258 struct workqueue_struct *event_queue;
259 struct delayed_work event_work;
260
261 /* wl status bits */
262 unsigned long stat;
263 enum gelic_eurus_auth_method auth_method; /* open/shared */
264 enum gelic_wl_cipher_method group_cipher_method;
265 enum gelic_wl_cipher_method pairwise_cipher_method;
266 enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */
267
268 /* association handling */
269 struct semaphore assoc_stat_lock;
270 struct delayed_work assoc_work;
271 enum gelic_wl_assoc_state assoc_stat;
272 struct completion assoc_done;
273
274 spinlock_t lock;
275 u16 ch_info; /* available channels. bit0 = ch1 */
276 /* WEP keys */
277 u8 key[GELIC_WEP_KEYS][IW_ENCODING_TOKEN_MAX];
278 unsigned long key_enabled;
279 unsigned int key_len[GELIC_WEP_KEYS];
280 unsigned int current_key;
281 /* WWPA PSK */
282 u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN];
283 enum gelic_eurus_wpa_psk_type psk_type;
284 unsigned int psk_len;
285
286 u8 essid[IW_ESSID_MAX_SIZE];
287 u8 bssid[ETH_ALEN]; /* userland requested */
288 u8 active_bssid[ETH_ALEN]; /* associated bssid */
289 unsigned int essid_len;
290
291 /* buffer for hypervisor IO */
292 void *buf;
293
294 struct iw_public_data wireless_data;
295 struct iw_statistics iwstat;
296};
297
298#define GELIC_WL_BSS_MAX_ENT 32
299#define GELIC_WL_ASSOC_RETRY 50
300static inline struct gelic_port *wl_port(struct gelic_wl_info *wl)
301{
302 return container_of((void *)wl, struct gelic_port, priv);
303}
304static inline struct gelic_wl_info *port_wl(struct gelic_port *port)
305{
306 return port_priv(port);
307}
308
309struct gelic_eurus_cmd {
310 struct work_struct work;
311 struct gelic_wl_info *wl;
312 unsigned int cmd; /* command code */
313 u64 tag;
314 u64 size;
315 void *buffer;
316 unsigned int buf_size;
317 struct completion done;
318 int status;
319 u64 cmd_status;
320};
321
322/* private ioctls to pass PSK */
323#define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0)
324#define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1)
325
326extern int gelic_wl_driver_probe(struct gelic_card *card);
327extern int gelic_wl_driver_remove(struct gelic_card *card);
328extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
329#endif /* _GELIC_WIRELESS_H */
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 2334f4ebf907..19184e486ae9 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -61,7 +61,6 @@
61 61
62/* Time in jiffies before concluding the transmitter is hung. */ 62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (6000 * HZ / 1000) 63#define TX_TIMEOUT (6000 * HZ / 1000)
64#define TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
65 64
66/* RDC MAC I/O Size */ 65/* RDC MAC I/O Size */
67#define R6040_IO_SIZE 256 66#define R6040_IO_SIZE 256
@@ -174,8 +173,6 @@ struct r6040_private {
174 struct net_device *dev; 173 struct net_device *dev;
175 struct mii_if_info mii_if; 174 struct mii_if_info mii_if;
176 struct napi_struct napi; 175 struct napi_struct napi;
177 struct net_device_stats stats;
178 u16 napi_rx_running;
179 void __iomem *base; 176 void __iomem *base;
180}; 177};
181 178
@@ -235,17 +232,53 @@ static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
235 phy_write(ioaddr, lp->phy_addr, reg, val); 232 phy_write(ioaddr, lp->phy_addr, reg, val);
236} 233}
237 234
238static void r6040_tx_timeout(struct net_device *dev) 235static void r6040_free_txbufs(struct net_device *dev)
239{ 236{
240 struct r6040_private *priv = netdev_priv(dev); 237 struct r6040_private *lp = netdev_priv(dev);
238 int i;
241 239
242 disable_irq(dev->irq); 240 for (i = 0; i < TX_DCNT; i++) {
243 napi_disable(&priv->napi); 241 if (lp->tx_insert_ptr->skb_ptr) {
244 spin_lock(&priv->lock); 242 pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
245 dev->stats.tx_errors++; 243 MAX_BUF_SIZE, PCI_DMA_TODEVICE);
246 spin_unlock(&priv->lock); 244 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
245 lp->rx_insert_ptr->skb_ptr = NULL;
246 }
247 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
248 }
249}
247 250
248 netif_stop_queue(dev); 251static void r6040_free_rxbufs(struct net_device *dev)
252{
253 struct r6040_private *lp = netdev_priv(dev);
254 int i;
255
256 for (i = 0; i < RX_DCNT; i++) {
257 if (lp->rx_insert_ptr->skb_ptr) {
258 pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
259 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
260 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
261 lp->rx_insert_ptr->skb_ptr = NULL;
262 }
263 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
264 }
265}
266
267static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
268 dma_addr_t desc_dma, int size)
269{
270 struct r6040_descriptor *desc = desc_ring;
271 dma_addr_t mapping = desc_dma;
272
273 while (size-- > 0) {
274 mapping += sizeof(sizeof(*desc));
275 desc->ndesc = cpu_to_le32(mapping);
276 desc->vndescp = desc + 1;
277 desc++;
278 }
279 desc--;
280 desc->ndesc = cpu_to_le32(desc_dma);
281 desc->vndescp = desc_ring;
249} 282}
250 283
251/* Allocate skb buffer for rx descriptor */ 284/* Allocate skb buffer for rx descriptor */
@@ -256,7 +289,7 @@ static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
256 289
257 descptr = lp->rx_insert_ptr; 290 descptr = lp->rx_insert_ptr;
258 while (lp->rx_free_desc < RX_DCNT) { 291 while (lp->rx_free_desc < RX_DCNT) {
259 descptr->skb_ptr = dev_alloc_skb(MAX_BUF_SIZE); 292 descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE);
260 293
261 if (!descptr->skb_ptr) 294 if (!descptr->skb_ptr)
262 break; 295 break;
@@ -272,6 +305,63 @@ static void rx_buf_alloc(struct r6040_private *lp, struct net_device *dev)
272 lp->rx_insert_ptr = descptr; 305 lp->rx_insert_ptr = descptr;
273} 306}
274 307
308static void r6040_alloc_txbufs(struct net_device *dev)
309{
310 struct r6040_private *lp = netdev_priv(dev);
311 void __iomem *ioaddr = lp->base;
312
313 lp->tx_free_desc = TX_DCNT;
314
315 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
316 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
317
318 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
319 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
320}
321
322static void r6040_alloc_rxbufs(struct net_device *dev)
323{
324 struct r6040_private *lp = netdev_priv(dev);
325 void __iomem *ioaddr = lp->base;
326
327 lp->rx_free_desc = 0;
328
329 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
330 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
331
332 rx_buf_alloc(lp, dev);
333
334 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
335 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
336}
337
338static void r6040_tx_timeout(struct net_device *dev)
339{
340 struct r6040_private *priv = netdev_priv(dev);
341 void __iomem *ioaddr = priv->base;
342
343 printk(KERN_WARNING "%s: transmit timed out, status %4.4x, PHY status "
344 "%4.4x\n",
345 dev->name, ioread16(ioaddr + MIER),
346 mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
347
348 disable_irq(dev->irq);
349 napi_disable(&priv->napi);
350 spin_lock(&priv->lock);
351 /* Clear all descriptors */
352 r6040_free_txbufs(dev);
353 r6040_free_rxbufs(dev);
354 r6040_alloc_txbufs(dev);
355 r6040_alloc_rxbufs(dev);
356
357 /* Reset MAC */
358 iowrite16(MAC_RST, ioaddr + MCR1);
359 spin_unlock(&priv->lock);
360 enable_irq(dev->irq);
361
362 dev->stats.tx_errors++;
363 netif_wake_queue(dev);
364}
275 365
276static struct net_device_stats *r6040_get_stats(struct net_device *dev) 366static struct net_device_stats *r6040_get_stats(struct net_device *dev)
277{ 367{
@@ -280,11 +370,11 @@ static struct net_device_stats *r6040_get_stats(struct net_device *dev)
280 unsigned long flags; 370 unsigned long flags;
281 371
282 spin_lock_irqsave(&priv->lock, flags); 372 spin_lock_irqsave(&priv->lock, flags);
283 priv->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1); 373 dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
284 priv->stats.multicast += ioread8(ioaddr + ME_CNT0); 374 dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
285 spin_unlock_irqrestore(&priv->lock, flags); 375 spin_unlock_irqrestore(&priv->lock, flags);
286 376
287 return &priv->stats; 377 return &dev->stats;
288} 378}
289 379
290/* Stop RDC MAC and Free the allocated resource */ 380/* Stop RDC MAC and Free the allocated resource */
@@ -293,7 +383,6 @@ static void r6040_down(struct net_device *dev)
293 struct r6040_private *lp = netdev_priv(dev); 383 struct r6040_private *lp = netdev_priv(dev);
294 void __iomem *ioaddr = lp->base; 384 void __iomem *ioaddr = lp->base;
295 struct pci_dev *pdev = lp->pdev; 385 struct pci_dev *pdev = lp->pdev;
296 int i;
297 int limit = 2048; 386 int limit = 2048;
298 u16 *adrp; 387 u16 *adrp;
299 u16 cmd; 388 u16 cmd;
@@ -313,27 +402,12 @@ static void r6040_down(struct net_device *dev)
313 iowrite16(adrp[1], ioaddr + MID_0M); 402 iowrite16(adrp[1], ioaddr + MID_0M);
314 iowrite16(adrp[2], ioaddr + MID_0H); 403 iowrite16(adrp[2], ioaddr + MID_0H);
315 free_irq(dev->irq, dev); 404 free_irq(dev->irq, dev);
405
316 /* Free RX buffer */ 406 /* Free RX buffer */
317 for (i = 0; i < RX_DCNT; i++) { 407 r6040_free_rxbufs(dev);
318 if (lp->rx_insert_ptr->skb_ptr) {
319 pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf,
320 MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
321 dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
322 lp->rx_insert_ptr->skb_ptr = NULL;
323 }
324 lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
325 }
326 408
327 /* Free TX buffer */ 409 /* Free TX buffer */
328 for (i = 0; i < TX_DCNT; i++) { 410 r6040_free_txbufs(dev);
329 if (lp->tx_insert_ptr->skb_ptr) {
330 pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf,
331 MAX_BUF_SIZE, PCI_DMA_TODEVICE);
332 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
333 lp->rx_insert_ptr->skb_ptr = NULL;
334 }
335 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
336 }
337 411
338 /* Free Descriptor memory */ 412 /* Free Descriptor memory */
339 pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); 413 pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
@@ -432,19 +506,24 @@ static int r6040_rx(struct net_device *dev, int limit)
432 506
433 /* Check for errors */ 507 /* Check for errors */
434 err = ioread16(ioaddr + MLSR); 508 err = ioread16(ioaddr + MLSR);
435 if (err & 0x0400) priv->stats.rx_errors++; 509 if (err & 0x0400)
510 dev->stats.rx_errors++;
436 /* RX FIFO over-run */ 511 /* RX FIFO over-run */
437 if (err & 0x8000) priv->stats.rx_fifo_errors++; 512 if (err & 0x8000)
513 dev->stats.rx_fifo_errors++;
438 /* RX descriptor unavailable */ 514 /* RX descriptor unavailable */
439 if (err & 0x0080) priv->stats.rx_frame_errors++; 515 if (err & 0x0080)
516 dev->stats.rx_frame_errors++;
440 /* Received packet with length over buffer lenght */ 517 /* Received packet with length over buffer lenght */
441 if (err & 0x0020) priv->stats.rx_over_errors++; 518 if (err & 0x0020)
519 dev->stats.rx_over_errors++;
442 /* Received packet with too long or short */ 520 /* Received packet with too long or short */
443 if (err & (0x0010|0x0008)) priv->stats.rx_length_errors++; 521 if (err & (0x0010 | 0x0008))
522 dev->stats.rx_length_errors++;
444 /* Received packet with CRC errors */ 523 /* Received packet with CRC errors */
445 if (err & 0x0004) { 524 if (err & 0x0004) {
446 spin_lock(&priv->lock); 525 spin_lock(&priv->lock);
447 priv->stats.rx_crc_errors++; 526 dev->stats.rx_crc_errors++;
448 spin_unlock(&priv->lock); 527 spin_unlock(&priv->lock);
449 } 528 }
450 529
@@ -469,8 +548,8 @@ static int r6040_rx(struct net_device *dev, int limit)
469 /* Send to upper layer */ 548 /* Send to upper layer */
470 netif_receive_skb(skb_ptr); 549 netif_receive_skb(skb_ptr);
471 dev->last_rx = jiffies; 550 dev->last_rx = jiffies;
472 priv->dev->stats.rx_packets++; 551 dev->stats.rx_packets++;
473 priv->dev->stats.rx_bytes += descptr->len; 552 dev->stats.rx_bytes += descptr->len;
474 /* To next descriptor */ 553 /* To next descriptor */
475 descptr = descptr->vndescp; 554 descptr = descptr->vndescp;
476 priv->rx_free_desc--; 555 priv->rx_free_desc--;
@@ -498,11 +577,13 @@ static void r6040_tx(struct net_device *dev)
498 /* Check for errors */ 577 /* Check for errors */
499 err = ioread16(ioaddr + MLSR); 578 err = ioread16(ioaddr + MLSR);
500 579
501 if (err & 0x0200) priv->stats.rx_fifo_errors++; 580 if (err & 0x0200)
502 if (err & (0x2000 | 0x4000)) priv->stats.tx_carrier_errors++; 581 dev->stats.rx_fifo_errors++;
582 if (err & (0x2000 | 0x4000))
583 dev->stats.tx_carrier_errors++;
503 584
504 if (descptr->status & 0x8000) 585 if (descptr->status & 0x8000)
505 break; /* Not complte */ 586 break; /* Not complete */
506 skb_ptr = descptr->skb_ptr; 587 skb_ptr = descptr->skb_ptr;
507 pci_unmap_single(priv->pdev, descptr->buf, 588 pci_unmap_single(priv->pdev, descptr->buf,
508 skb_ptr->len, PCI_DMA_TODEVICE); 589 skb_ptr->len, PCI_DMA_TODEVICE);
@@ -545,7 +626,6 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
545 struct r6040_private *lp = netdev_priv(dev); 626 struct r6040_private *lp = netdev_priv(dev);
546 void __iomem *ioaddr = lp->base; 627 void __iomem *ioaddr = lp->base;
547 u16 status; 628 u16 status;
548 int handled = 1;
549 629
550 /* Mask off RDC MAC interrupt */ 630 /* Mask off RDC MAC interrupt */
551 iowrite16(MSK_INT, ioaddr + MIER); 631 iowrite16(MSK_INT, ioaddr + MIER);
@@ -565,7 +645,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
565 if (status & 0x10) 645 if (status & 0x10)
566 r6040_tx(dev); 646 r6040_tx(dev);
567 647
568 return IRQ_RETVAL(handled); 648 return IRQ_HANDLED;
569} 649}
570 650
571#ifdef CONFIG_NET_POLL_CONTROLLER 651#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -577,53 +657,15 @@ static void r6040_poll_controller(struct net_device *dev)
577} 657}
578#endif 658#endif
579 659
580static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
581 dma_addr_t desc_dma, int size)
582{
583 struct r6040_descriptor *desc = desc_ring;
584 dma_addr_t mapping = desc_dma;
585
586 while (size-- > 0) {
587 mapping += sizeof(sizeof(*desc));
588 desc->ndesc = cpu_to_le32(mapping);
589 desc->vndescp = desc + 1;
590 desc++;
591 }
592 desc--;
593 desc->ndesc = cpu_to_le32(desc_dma);
594 desc->vndescp = desc_ring;
595}
596
597/* Init RDC MAC */ 660/* Init RDC MAC */
598static void r6040_up(struct net_device *dev) 661static void r6040_up(struct net_device *dev)
599{ 662{
600 struct r6040_private *lp = netdev_priv(dev); 663 struct r6040_private *lp = netdev_priv(dev);
601 void __iomem *ioaddr = lp->base; 664 void __iomem *ioaddr = lp->base;
602 665
603 /* Initialize */ 666 /* Initialise and alloc RX/TX buffers */
604 lp->tx_free_desc = TX_DCNT; 667 r6040_alloc_txbufs(dev);
605 lp->rx_free_desc = 0; 668 r6040_alloc_rxbufs(dev);
606 /* Init descriptor */
607 lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
608 lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
609 /* Init TX descriptor */
610 r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
611
612 /* Init RX descriptor */
613 r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
614
615 /* Allocate buffer for RX descriptor */
616 rx_buf_alloc(lp, dev);
617
618 /*
619 * TX and RX descriptor start registers.
620 * Lower 16-bits to MxD_SA0. Higher 16-bits to MxD_SA1.
621 */
622 iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
623 iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
624
625 iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
626 iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
627 669
628 /* Buffer Size Register */ 670 /* Buffer Size Register */
629 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR); 671 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
@@ -689,8 +731,7 @@ static void r6040_timer(unsigned long data)
689 } 731 }
690 732
691 /* Timer active again */ 733 /* Timer active again */
692 lp->timer.expires = TIMER_WUT; 734 mod_timer(&lp->timer, jiffies + round_jiffies(HZ));
693 add_timer(&lp->timer);
694} 735}
695 736
696/* Read/set MAC address routines */ 737/* Read/set MAC address routines */
@@ -746,14 +787,10 @@ static int r6040_open(struct net_device *dev)
746 napi_enable(&lp->napi); 787 napi_enable(&lp->napi);
747 netif_start_queue(dev); 788 netif_start_queue(dev);
748 789
749 if (lp->switch_sig != ICPLUS_PHY_ID) { 790 /* set and active a timer process */
750 /* set and active a timer process */ 791 setup_timer(&lp->timer, r6040_timer, (unsigned long) dev);
751 init_timer(&lp->timer); 792 if (lp->switch_sig != ICPLUS_PHY_ID)
752 lp->timer.expires = TIMER_WUT; 793 mod_timer(&lp->timer, jiffies + HZ);
753 lp->timer.data = (unsigned long)dev;
754 lp->timer.function = &r6040_timer;
755 add_timer(&lp->timer);
756 }
757 return 0; 794 return 0;
758} 795}
759 796
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 2e9e88be7b33..202fdf356621 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1630,7 +1630,8 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
1630 SIS_PCI_COMMIT(); 1630 SIS_PCI_COMMIT();
1631} 1631}
1632 1632
1633static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev) 1633static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1634 struct net_device *dev)
1634{ 1635{
1635 u8 from; 1636 u8 from;
1636 1637
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index db606b603884..26ffb67f1da2 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -8781,7 +8781,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
8781 return -EAGAIN; 8781 return -EAGAIN;
8782 8782
8783 if (data == 0) 8783 if (data == 0)
8784 data = 2; 8784 data = UINT_MAX / 2;
8785 8785
8786 for (i = 0; i < (data * 2); i++) { 8786 for (i = 0; i < (data * 2); i++) {
8787 if ((i % 2) == 0) 8787 if ((i % 2) == 0)
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 35d15e850075..6f33f84d37b0 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -36,6 +36,7 @@
36#include <linux/net.h> 36#include <linux/net.h>
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <linux/ethtool.h>
39#include <linux/skbuff.h> 40#include <linux/skbuff.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
@@ -297,18 +298,11 @@ static void tsi108_check_phy(struct net_device *dev)
297 u32 speed; 298 u32 speed;
298 unsigned long flags; 299 unsigned long flags;
299 300
300 /* Do a dummy read, as for some reason the first read
301 * after a link becomes up returns link down, even if
302 * it's been a while since the link came up.
303 */
304
305 spin_lock_irqsave(&phy_lock, flags); 301 spin_lock_irqsave(&phy_lock, flags);
306 302
307 if (!data->phy_ok) 303 if (!data->phy_ok)
308 goto out; 304 goto out;
309 305
310 tsi108_read_mii(data, MII_BMSR);
311
312 duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media); 306 duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
313 data->init_media = 0; 307 data->init_media = 0;
314 308
@@ -345,22 +339,21 @@ static void tsi108_check_phy(struct net_device *dev)
345 339
346 TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg); 340 TSI_WRITE(TSI108_MAC_CFG2, mac_cfg2_reg);
347 TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg); 341 TSI_WRITE(TSI108_EC_PORTCTRL, portctrl_reg);
342 }
348 343
349 if (data->link_up == 0) { 344 if (data->link_up == 0) {
350 /* The manual says it can take 3-4 usecs for the speed change 345 /* The manual says it can take 3-4 usecs for the speed change
351 * to take effect. 346 * to take effect.
352 */ 347 */
353 udelay(5); 348 udelay(5);
354 349
355 spin_lock(&data->txlock); 350 spin_lock(&data->txlock);
356 if (is_valid_ether_addr(dev->dev_addr) && data->txfree) 351 if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
357 netif_wake_queue(dev); 352 netif_wake_queue(dev);
358 353
359 data->link_up = 1; 354 data->link_up = 1;
360 spin_unlock(&data->txlock); 355 spin_unlock(&data->txlock);
361 }
362 } 356 }
363
364 } else { 357 } else {
365 if (data->link_up == 1) { 358 if (data->link_up == 1) {
366 netif_stop_queue(dev); 359 netif_stop_queue(dev);
@@ -1274,12 +1267,11 @@ static void tsi108_init_phy(struct net_device *dev)
1274 * PHY_STAT register before the link up status bit is set. 1267 * PHY_STAT register before the link up status bit is set.
1275 */ 1268 */
1276 1269
1277 data->link_up = 1; 1270 data->link_up = 0;
1278 1271
1279 while (!((phyval = tsi108_read_mii(data, MII_BMSR)) & 1272 while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
1280 BMSR_LSTATUS)) { 1273 BMSR_LSTATUS)) {
1281 if (i++ > (MII_READ_DELAY / 10)) { 1274 if (i++ > (MII_READ_DELAY / 10)) {
1282 data->link_up = 0;
1283 break; 1275 break;
1284 } 1276 }
1285 spin_unlock_irqrestore(&phy_lock, flags); 1277 spin_unlock_irqrestore(&phy_lock, flags);
@@ -1287,6 +1279,7 @@ static void tsi108_init_phy(struct net_device *dev)
1287 spin_lock_irqsave(&phy_lock, flags); 1279 spin_lock_irqsave(&phy_lock, flags);
1288 } 1280 }
1289 1281
1282 data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
1290 printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval); 1283 printk(KERN_DEBUG "PHY_STAT reg contains %08x\n", phyval);
1291 data->phy_ok = 1; 1284 data->phy_ok = 1;
1292 data->init_media = 1; 1285 data->init_media = 1;
@@ -1527,12 +1520,46 @@ static void tsi108_init_mac(struct net_device *dev)
1527 TSI_WRITE(TSI108_EC_INTMASK, ~0); 1520 TSI_WRITE(TSI108_EC_INTMASK, ~0);
1528} 1521}
1529 1522
1523static int tsi108_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1524{
1525 struct tsi108_prv_data *data = netdev_priv(dev);
1526 unsigned long flags;
1527 int rc;
1528
1529 spin_lock_irqsave(&data->txlock, flags);
1530 rc = mii_ethtool_gset(&data->mii_if, cmd);
1531 spin_unlock_irqrestore(&data->txlock, flags);
1532
1533 return rc;
1534}
1535
1536static int tsi108_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1537{
1538 struct tsi108_prv_data *data = netdev_priv(dev);
1539 unsigned long flags;
1540 int rc;
1541
1542 spin_lock_irqsave(&data->txlock, flags);
1543 rc = mii_ethtool_sset(&data->mii_if, cmd);
1544 spin_unlock_irqrestore(&data->txlock, flags);
1545
1546 return rc;
1547}
1548
1530static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1549static int tsi108_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1531{ 1550{
1532 struct tsi108_prv_data *data = netdev_priv(dev); 1551 struct tsi108_prv_data *data = netdev_priv(dev);
1552 if (!netif_running(dev))
1553 return -EINVAL;
1533 return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL); 1554 return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
1534} 1555}
1535 1556
1557static const struct ethtool_ops tsi108_ethtool_ops = {
1558 .get_link = ethtool_op_get_link,
1559 .get_settings = tsi108_get_settings,
1560 .set_settings = tsi108_set_settings,
1561};
1562
1536static int 1563static int
1537tsi108_init_one(struct platform_device *pdev) 1564tsi108_init_one(struct platform_device *pdev)
1538{ 1565{
@@ -1584,7 +1611,6 @@ tsi108_init_one(struct platform_device *pdev)
1584 data->mii_if.phy_id = einfo->phy; 1611 data->mii_if.phy_id = einfo->phy;
1585 data->mii_if.phy_id_mask = 0x1f; 1612 data->mii_if.phy_id_mask = 0x1f;
1586 data->mii_if.reg_num_mask = 0x1f; 1613 data->mii_if.reg_num_mask = 0x1f;
1587 data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
1588 1614
1589 data->phy = einfo->phy; 1615 data->phy = einfo->phy;
1590 data->phy_type = einfo->phy_type; 1616 data->phy_type = einfo->phy_type;
@@ -1598,6 +1624,7 @@ tsi108_init_one(struct platform_device *pdev)
1598 dev->get_stats = tsi108_get_stats; 1624 dev->get_stats = tsi108_get_stats;
1599 netif_napi_add(dev, &data->napi, tsi108_poll, 64); 1625 netif_napi_add(dev, &data->napi, tsi108_poll, 64);
1600 dev->do_ioctl = tsi108_do_ioctl; 1626 dev->do_ioctl = tsi108_do_ioctl;
1627 dev->ethtool_ops = &tsi108_ethtool_ops;
1601 1628
1602 /* Apparently, the Linux networking code won't use scatter-gather 1629 /* Apparently, the Linux networking code won't use scatter-gather
1603 * if the hardware doesn't do checksums. However, it's faster 1630 * if the hardware doesn't do checksums. However, it's faster
@@ -1629,6 +1656,7 @@ tsi108_init_one(struct platform_device *pdev)
1629 goto register_fail; 1656 goto register_fail;
1630 } 1657 }
1631 1658
1659 platform_set_drvdata(pdev, dev);
1632 printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %s\n", 1660 printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %s\n",
1633 dev->name, print_mac(mac, dev->dev_addr)); 1661 dev->name, print_mac(mac, dev->dev_addr));
1634#ifdef DEBUG 1662#ifdef DEBUG
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 3f67a29593bc..e2ad98bee6e7 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -244,18 +244,6 @@ static int veth_open(struct net_device *dev)
244 return 0; 244 return 0;
245} 245}
246 246
247static int veth_close(struct net_device *dev)
248{
249 struct veth_priv *priv;
250
251 if (netif_carrier_ok(dev)) {
252 priv = netdev_priv(dev);
253 netif_carrier_off(dev);
254 netif_carrier_off(priv->peer);
255 }
256 return 0;
257}
258
259static int veth_dev_init(struct net_device *dev) 247static int veth_dev_init(struct net_device *dev)
260{ 248{
261 struct veth_net_stats *stats; 249 struct veth_net_stats *stats;
@@ -286,13 +274,50 @@ static void veth_setup(struct net_device *dev)
286 dev->hard_start_xmit = veth_xmit; 274 dev->hard_start_xmit = veth_xmit;
287 dev->get_stats = veth_get_stats; 275 dev->get_stats = veth_get_stats;
288 dev->open = veth_open; 276 dev->open = veth_open;
289 dev->stop = veth_close;
290 dev->ethtool_ops = &veth_ethtool_ops; 277 dev->ethtool_ops = &veth_ethtool_ops;
291 dev->features |= NETIF_F_LLTX; 278 dev->features |= NETIF_F_LLTX;
292 dev->init = veth_dev_init; 279 dev->init = veth_dev_init;
293 dev->destructor = veth_dev_free; 280 dev->destructor = veth_dev_free;
294} 281}
295 282
283static void veth_change_state(struct net_device *dev)
284{
285 struct net_device *peer;
286 struct veth_priv *priv;
287
288 priv = netdev_priv(dev);
289 peer = priv->peer;
290
291 if (netif_carrier_ok(peer)) {
292 if (!netif_carrier_ok(dev))
293 netif_carrier_on(dev);
294 } else {
295 if (netif_carrier_ok(dev))
296 netif_carrier_off(dev);
297 }
298}
299
300static int veth_device_event(struct notifier_block *unused,
301 unsigned long event, void *ptr)
302{
303 struct net_device *dev = ptr;
304
305 if (dev->open != veth_open)
306 goto out;
307
308 switch (event) {
309 case NETDEV_CHANGE:
310 veth_change_state(dev);
311 break;
312 }
313out:
314 return NOTIFY_DONE;
315}
316
317static struct notifier_block veth_notifier_block __read_mostly = {
318 .notifier_call = veth_device_event,
319};
320
296/* 321/*
297 * netlink interface 322 * netlink interface
298 */ 323 */
@@ -454,12 +479,14 @@ static struct rtnl_link_ops veth_link_ops = {
454 479
455static __init int veth_init(void) 480static __init int veth_init(void)
456{ 481{
482 register_netdevice_notifier(&veth_notifier_block);
457 return rtnl_link_register(&veth_link_ops); 483 return rtnl_link_register(&veth_link_ops);
458} 484}
459 485
460static __exit void veth_exit(void) 486static __exit void veth_exit(void)
461{ 487{
462 rtnl_link_unregister(&veth_link_ops); 488 rtnl_link_unregister(&veth_link_ops);
489 unregister_netdevice_notifier(&veth_notifier_block);
463} 490}
464 491
465module_init(veth_init); 492module_init(veth_init);
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index c79066b38d3b..69dea3392612 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -1035,7 +1035,7 @@ struct ath5k_hw {
1035 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 1035 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
1036 unsigned int, unsigned int, unsigned int, unsigned int, 1036 unsigned int, unsigned int, unsigned int, unsigned int,
1037 unsigned int, unsigned int, unsigned int); 1037 unsigned int, unsigned int, unsigned int);
1038 bool (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1038 int (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1039 unsigned int, unsigned int, unsigned int, unsigned int, 1039 unsigned int, unsigned int, unsigned int, unsigned int,
1040 unsigned int, unsigned int); 1040 unsigned int, unsigned int);
1041 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *); 1041 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *);
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index ddc87149fe31..bef967ce34a6 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -668,7 +668,10 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
668 * return false w/o doing anything. MAC's that do 668 * return false w/o doing anything. MAC's that do
669 * support it will return true w/o doing anything. 669 * support it will return true w/o doing anything.
670 */ 670 */
671 if (ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0)) 671 ret = ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
672 if (ret < 0)
673 goto err;
674 if (ret > 0)
672 __set_bit(ATH_STAT_MRRETRY, sc->status); 675 __set_bit(ATH_STAT_MRRETRY, sc->status);
673 676
674 /* 677 /*
@@ -1256,7 +1259,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1256 if (ctl->flags & IEEE80211_TXCTL_NO_ACK) 1259 if (ctl->flags & IEEE80211_TXCTL_NO_ACK)
1257 flags |= AR5K_TXDESC_NOACK; 1260 flags |= AR5K_TXDESC_NOACK;
1258 1261
1259 pktlen = skb->len + FCS_LEN; 1262 pktlen = skb->len;
1260 1263
1261 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) { 1264 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) {
1262 keyidx = ctl->key_idx; 1265 keyidx = ctl->key_idx;
@@ -1715,6 +1718,7 @@ ath5k_tasklet_rx(unsigned long data)
1715 break; 1718 break;
1716 else if (unlikely(ret)) { 1719 else if (unlikely(ret)) {
1717 ATH5K_ERR(sc, "error in processing rx descriptor\n"); 1720 ATH5K_ERR(sc, "error in processing rx descriptor\n");
1721 spin_unlock(&sc->rxbuflock);
1718 return; 1722 return;
1719 } 1723 }
1720 1724
@@ -1952,7 +1956,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1952 } 1956 }
1953 1957
1954 ds->ds_data = bf->skbaddr; 1958 ds->ds_data = bf->skbaddr;
1955 ret = ah->ah_setup_tx_desc(ah, ds, skb->len + FCS_LEN, 1959 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1956 ieee80211_get_hdrlen_from_skb(skb), 1960 ieee80211_get_hdrlen_from_skb(skb),
1957 AR5K_PKT_TYPE_BEACON, (ctl->power_level * 2), ctl->tx_rate, 1, 1961 AR5K_PKT_TYPE_BEACON, (ctl->power_level * 2), ctl->tx_rate, 1,
1958 AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0); 1962 AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0);
@@ -2126,8 +2130,9 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2126 "updated timers based on beacon TSF\n"); 2130 "updated timers based on beacon TSF\n");
2127 2131
2128 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 2132 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2129 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", 2133 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
2130 bc_tsf, hw_tsf, bc_tu, hw_tu, nexttbtt); 2134 (unsigned long long) bc_tsf,
2135 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2131 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", 2136 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2132 intval & AR5K_BEACON_PERIOD, 2137 intval & AR5K_BEACON_PERIOD,
2133 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", 2138 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
@@ -2385,10 +2390,11 @@ ath5k_intr(int irq, void *dev_id)
2385 u64 tsf = ath5k_hw_get_tsf64(ah); 2390 u64 tsf = ath5k_hw_get_tsf64(ah);
2386 sc->nexttbtt += sc->bintval; 2391 sc->nexttbtt += sc->bintval;
2387 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 2392 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2388 "SWBA nexttbtt: %x hw_tu: %x " 2393 "SWBA nexttbtt: %x hw_tu: %x "
2389 "TSF: %llx\n", 2394 "TSF: %llx\n",
2390 sc->nexttbtt, 2395 sc->nexttbtt,
2391 TSF_TO_TU(tsf), tsf); 2396 TSF_TO_TU(tsf),
2397 (unsigned long long) tsf);
2392 } else { 2398 } else {
2393 ath5k_beacon_send(sc); 2399 ath5k_beacon_send(sc);
2394 } 2400 }
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
index 3a4bf4035a23..c2de2d958e8e 100644
--- a/drivers/net/wireless/ath5k/hw.c
+++ b/drivers/net/wireless/ath5k/hw.c
@@ -45,7 +45,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
45 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 45 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
46 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, 46 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
47 unsigned int, unsigned int); 47 unsigned int, unsigned int);
48static bool ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *, 48static int ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
49 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, 49 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
50 unsigned int); 50 unsigned int);
51static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *); 51static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *);
@@ -3506,7 +3506,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3506{ 3506{
3507 u32 frame_type; 3507 u32 frame_type;
3508 struct ath5k_hw_2w_tx_desc *tx_desc; 3508 struct ath5k_hw_2w_tx_desc *tx_desc;
3509 unsigned int buff_len; 3509 unsigned int frame_len;
3510 3510
3511 tx_desc = (struct ath5k_hw_2w_tx_desc *)&desc->ds_ctl0; 3511 tx_desc = (struct ath5k_hw_2w_tx_desc *)&desc->ds_ctl0;
3512 3512
@@ -3537,22 +3537,25 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3537 /* Setup control descriptor */ 3537 /* Setup control descriptor */
3538 3538
3539 /* Verify and set frame length */ 3539 /* Verify and set frame length */
3540 if (pkt_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN) 3540
3541 /* remove padding we might have added before */
3542 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
3543
3544 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
3541 return -EINVAL; 3545 return -EINVAL;
3542 3546
3543 tx_desc->tx_control_0 = pkt_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN; 3547 tx_desc->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
3544 3548
3545 /* Verify and set buffer length */ 3549 /* Verify and set buffer length */
3546 buff_len = pkt_len - FCS_LEN;
3547 3550
3548 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 3551 /* NB: beacon's BufLen must be a multiple of 4 bytes */
3549 if(type == AR5K_PKT_TYPE_BEACON) 3552 if(type == AR5K_PKT_TYPE_BEACON)
3550 buff_len = roundup(buff_len, 4); 3553 pkt_len = roundup(pkt_len, 4);
3551 3554
3552 if (buff_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN) 3555 if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
3553 return -EINVAL; 3556 return -EINVAL;
3554 3557
3555 tx_desc->tx_control_1 = buff_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN; 3558 tx_desc->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
3556 3559
3557 /* 3560 /*
3558 * Verify and set header length 3561 * Verify and set header length
@@ -3634,7 +3637,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3634{ 3637{
3635 struct ath5k_hw_4w_tx_desc *tx_desc; 3638 struct ath5k_hw_4w_tx_desc *tx_desc;
3636 struct ath5k_hw_tx_status *tx_status; 3639 struct ath5k_hw_tx_status *tx_status;
3637 unsigned int buff_len; 3640 unsigned int frame_len;
3638 3641
3639 ATH5K_TRACE(ah->ah_sc); 3642 ATH5K_TRACE(ah->ah_sc);
3640 tx_desc = (struct ath5k_hw_4w_tx_desc *)&desc->ds_ctl0; 3643 tx_desc = (struct ath5k_hw_4w_tx_desc *)&desc->ds_ctl0;
@@ -3669,22 +3672,25 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3669 /* Setup control descriptor */ 3672 /* Setup control descriptor */
3670 3673
3671 /* Verify and set frame length */ 3674 /* Verify and set frame length */
3672 if (pkt_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) 3675
3676 /* remove padding we might have added before */
3677 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
3678
3679 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
3673 return -EINVAL; 3680 return -EINVAL;
3674 3681
3675 tx_desc->tx_control_0 = pkt_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN; 3682 tx_desc->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
3676 3683
3677 /* Verify and set buffer length */ 3684 /* Verify and set buffer length */
3678 buff_len = pkt_len - FCS_LEN;
3679 3685
3680 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 3686 /* NB: beacon's BufLen must be a multiple of 4 bytes */
3681 if(type == AR5K_PKT_TYPE_BEACON) 3687 if(type == AR5K_PKT_TYPE_BEACON)
3682 buff_len = roundup(buff_len, 4); 3688 pkt_len = roundup(pkt_len, 4);
3683 3689
3684 if (buff_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN) 3690 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
3685 return -EINVAL; 3691 return -EINVAL;
3686 3692
3687 tx_desc->tx_control_1 = buff_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN; 3693 tx_desc->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
3688 3694
3689 tx_desc->tx_control_0 |= 3695 tx_desc->tx_control_0 |=
3690 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) | 3696 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
@@ -3737,7 +3743,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3737/* 3743/*
3738 * Initialize a 4-word multirate tx descriptor on 5212 3744 * Initialize a 4-word multirate tx descriptor on 5212
3739 */ 3745 */
3740static bool 3746static int
3741ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 3747ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3742 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2, 3748 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2,
3743 unsigned int tx_rate3, u_int tx_tries3) 3749 unsigned int tx_rate3, u_int tx_tries3)
@@ -3777,10 +3783,10 @@ ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3777 3783
3778#undef _XTX_TRIES 3784#undef _XTX_TRIES
3779 3785
3780 return true; 3786 return 1;
3781 } 3787 }
3782 3788
3783 return false; 3789 return 0;
3784} 3790}
3785 3791
3786/* 3792/*
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 08a011f0834a..f13346ba9dd2 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -14,6 +14,12 @@
14#include "lo.h" 14#include "lo.h"
15#include "phy.h" 15#include "phy.h"
16 16
17
18/* The unique identifier of the firmware that's officially supported by
19 * this driver version. */
20#define B43_SUPPORTED_FIRMWARE_ID "FW13"
21
22
17#ifdef CONFIG_B43_DEBUG 23#ifdef CONFIG_B43_DEBUG
18# define B43_DEBUG 1 24# define B43_DEBUG 1
19#else 25#else
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index ef65c41af00f..51dfce16178a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -58,6 +58,8 @@ MODULE_AUTHOR("Stefano Brivio");
58MODULE_AUTHOR("Michael Buesch"); 58MODULE_AUTHOR("Michael Buesch");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60 60
61MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
62
61 63
62static int modparam_bad_frames_preempt; 64static int modparam_bad_frames_preempt;
63module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); 65module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@@ -1859,11 +1861,11 @@ static int b43_upload_microcode(struct b43_wldev *dev)
1859 err = -EOPNOTSUPP; 1861 err = -EOPNOTSUPP;
1860 goto error; 1862 goto error;
1861 } 1863 }
1862 b43dbg(dev->wl, "Loading firmware version %u.%u " 1864 b43info(dev->wl, "Loading firmware version %u.%u "
1863 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", 1865 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
1864 fwrev, fwpatch, 1866 fwrev, fwpatch,
1865 (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, 1867 (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
1866 (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); 1868 (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F);
1867 1869
1868 dev->fw.rev = fwrev; 1870 dev->fw.rev = fwrev;
1869 dev->fw.patch = fwpatch; 1871 dev->fw.patch = fwpatch;
@@ -4200,6 +4202,33 @@ static struct ssb_driver b43_ssb_driver = {
4200 .resume = b43_resume, 4202 .resume = b43_resume,
4201}; 4203};
4202 4204
4205static void b43_print_driverinfo(void)
4206{
4207 const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "",
4208 *feat_leds = "", *feat_rfkill = "";
4209
4210#ifdef CONFIG_B43_PCI_AUTOSELECT
4211 feat_pci = "P";
4212#endif
4213#ifdef CONFIG_B43_PCMCIA
4214 feat_pcmcia = "M";
4215#endif
4216#ifdef CONFIG_B43_NPHY
4217 feat_nphy = "N";
4218#endif
4219#ifdef CONFIG_B43_LEDS
4220 feat_leds = "L";
4221#endif
4222#ifdef CONFIG_B43_RFKILL
4223 feat_rfkill = "R";
4224#endif
4225 printk(KERN_INFO "Broadcom 43xx driver loaded "
4226 "[ Features: %s%s%s%s%s, Firmware-ID: "
4227 B43_SUPPORTED_FIRMWARE_ID " ]\n",
4228 feat_pci, feat_pcmcia, feat_nphy,
4229 feat_leds, feat_rfkill);
4230}
4231
4203static int __init b43_init(void) 4232static int __init b43_init(void)
4204{ 4233{
4205 int err; 4234 int err;
@@ -4211,6 +4240,7 @@ static int __init b43_init(void)
4211 err = ssb_driver_register(&b43_ssb_driver); 4240 err = ssb_driver_register(&b43_ssb_driver);
4212 if (err) 4241 if (err)
4213 goto err_pcmcia_exit; 4242 goto err_pcmcia_exit;
4243 b43_print_driverinfo();
4214 4244
4215 return err; 4245 return err;
4216 4246
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index c80edd2b9044..93d45b71799a 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -23,6 +23,10 @@
23#include "phy.h" 23#include "phy.h"
24 24
25 25
26/* The unique identifier of the firmware that's officially supported by this
27 * driver version. */
28#define B43legacy_SUPPORTED_FIRMWARE_ID "FW10"
29
26#define B43legacy_IRQWAIT_MAX_RETRIES 20 30#define B43legacy_IRQWAIT_MAX_RETRIES 20
27 31
28#define B43legacy_RX_MAX_SSI 60 /* best guess at max ssi */ 32#define B43legacy_RX_MAX_SSI 60 /* best guess at max ssi */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 6e08405e8026..e87b427d5e43 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -354,7 +354,8 @@ return 0;
354} 354}
355 355
356 356
357u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx) 357static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
358 int controller_idx)
358{ 359{
359 static const u16 map64[] = { 360 static const u16 map64[] = {
360 B43legacy_MMIO_DMA64_BASE0, 361 B43legacy_MMIO_DMA64_BASE0,
@@ -373,7 +374,7 @@ u16 b43legacy_dmacontroller_base(int dma64bit, int controller_idx)
373 B43legacy_MMIO_DMA32_BASE5, 374 B43legacy_MMIO_DMA32_BASE5,
374 }; 375 };
375 376
376 if (dma64bit) { 377 if (type == B43legacy_DMA_64BIT) {
377 B43legacy_WARN_ON(!(controller_idx >= 0 && 378 B43legacy_WARN_ON(!(controller_idx >= 0 &&
378 controller_idx < ARRAY_SIZE(map64))); 379 controller_idx < ARRAY_SIZE(map64)));
379 return map64[controller_idx]; 380 return map64[controller_idx];
@@ -480,8 +481,9 @@ static void free_ringmemory(struct b43legacy_dmaring *ring)
480} 481}
481 482
482/* Reset the RX DMA channel */ 483/* Reset the RX DMA channel */
483int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, 484static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
484 u16 mmio_base, int dma64) 485 u16 mmio_base,
486 enum b43legacy_dmatype type)
485{ 487{
486 int i; 488 int i;
487 u32 value; 489 u32 value;
@@ -489,13 +491,14 @@ int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
489 491
490 might_sleep(); 492 might_sleep();
491 493
492 offset = dma64 ? B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL; 494 offset = (type == B43legacy_DMA_64BIT) ?
495 B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
493 b43legacy_write32(dev, mmio_base + offset, 0); 496 b43legacy_write32(dev, mmio_base + offset, 0);
494 for (i = 0; i < 10; i++) { 497 for (i = 0; i < 10; i++) {
495 offset = dma64 ? B43legacy_DMA64_RXSTATUS : 498 offset = (type == B43legacy_DMA_64BIT) ?
496 B43legacy_DMA32_RXSTATUS; 499 B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS;
497 value = b43legacy_read32(dev, mmio_base + offset); 500 value = b43legacy_read32(dev, mmio_base + offset);
498 if (dma64) { 501 if (type == B43legacy_DMA_64BIT) {
499 value &= B43legacy_DMA64_RXSTAT; 502 value &= B43legacy_DMA64_RXSTAT;
500 if (value == B43legacy_DMA64_RXSTAT_DISABLED) { 503 if (value == B43legacy_DMA64_RXSTAT_DISABLED) {
501 i = -1; 504 i = -1;
@@ -519,8 +522,9 @@ int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
519} 522}
520 523
521/* Reset the RX DMA channel */ 524/* Reset the RX DMA channel */
522int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, 525static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
523 u16 mmio_base, int dma64) 526 u16 mmio_base,
527 enum b43legacy_dmatype type)
524{ 528{
525 int i; 529 int i;
526 u32 value; 530 u32 value;
@@ -529,10 +533,10 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
529 might_sleep(); 533 might_sleep();
530 534
531 for (i = 0; i < 10; i++) { 535 for (i = 0; i < 10; i++) {
532 offset = dma64 ? B43legacy_DMA64_TXSTATUS : 536 offset = (type == B43legacy_DMA_64BIT) ?
533 B43legacy_DMA32_TXSTATUS; 537 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
534 value = b43legacy_read32(dev, mmio_base + offset); 538 value = b43legacy_read32(dev, mmio_base + offset);
535 if (dma64) { 539 if (type == B43legacy_DMA_64BIT) {
536 value &= B43legacy_DMA64_TXSTAT; 540 value &= B43legacy_DMA64_TXSTAT;
537 if (value == B43legacy_DMA64_TXSTAT_DISABLED || 541 if (value == B43legacy_DMA64_TXSTAT_DISABLED ||
538 value == B43legacy_DMA64_TXSTAT_IDLEWAIT || 542 value == B43legacy_DMA64_TXSTAT_IDLEWAIT ||
@@ -547,13 +551,14 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
547 } 551 }
548 msleep(1); 552 msleep(1);
549 } 553 }
550 offset = dma64 ? B43legacy_DMA64_TXCTL : B43legacy_DMA32_TXCTL; 554 offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL :
555 B43legacy_DMA32_TXCTL;
551 b43legacy_write32(dev, mmio_base + offset, 0); 556 b43legacy_write32(dev, mmio_base + offset, 0);
552 for (i = 0; i < 10; i++) { 557 for (i = 0; i < 10; i++) {
553 offset = dma64 ? B43legacy_DMA64_TXSTATUS : 558 offset = (type == B43legacy_DMA_64BIT) ?
554 B43legacy_DMA32_TXSTATUS; 559 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
555 value = b43legacy_read32(dev, mmio_base + offset); 560 value = b43legacy_read32(dev, mmio_base + offset);
556 if (dma64) { 561 if (type == B43legacy_DMA_64BIT) {
557 value &= B43legacy_DMA64_TXSTAT; 562 value &= B43legacy_DMA64_TXSTAT;
558 if (value == B43legacy_DMA64_TXSTAT_DISABLED) { 563 if (value == B43legacy_DMA64_TXSTAT_DISABLED) {
559 i = -1; 564 i = -1;
@@ -578,6 +583,32 @@ int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
578 return 0; 583 return 0;
579} 584}
580 585
586/* Check if a DMA mapping address is invalid. */
587static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
588 dma_addr_t addr,
589 size_t buffersize)
590{
591 if (unlikely(dma_mapping_error(addr)))
592 return 1;
593
594 switch (ring->type) {
595 case B43legacy_DMA_30BIT:
596 if ((u64)addr + buffersize > (1ULL << 30))
597 return 1;
598 break;
599 case B43legacy_DMA_32BIT:
600 if ((u64)addr + buffersize > (1ULL << 32))
601 return 1;
602 break;
603 case B43legacy_DMA_64BIT:
604 /* Currently we can't have addresses beyond 64 bits in the kernel. */
605 break;
606 }
607
608 /* The address is OK. */
609 return 0;
610}
611
581static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, 612static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
582 struct b43legacy_dmadesc_generic *desc, 613 struct b43legacy_dmadesc_generic *desc,
583 struct b43legacy_dmadesc_meta *meta, 614 struct b43legacy_dmadesc_meta *meta,
@@ -595,7 +626,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
595 return -ENOMEM; 626 return -ENOMEM;
596 dmaaddr = map_descbuffer(ring, skb->data, 627 dmaaddr = map_descbuffer(ring, skb->data,
597 ring->rx_buffersize, 0); 628 ring->rx_buffersize, 0);
598 if (dma_mapping_error(dmaaddr)) { 629 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
599 /* ugh. try to realloc in zone_dma */ 630 /* ugh. try to realloc in zone_dma */
600 gfp_flags |= GFP_DMA; 631 gfp_flags |= GFP_DMA;
601 632
@@ -608,7 +639,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
608 ring->rx_buffersize, 0); 639 ring->rx_buffersize, 0);
609 } 640 }
610 641
611 if (dma_mapping_error(dmaaddr)) { 642 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
612 dev_kfree_skb_any(skb); 643 dev_kfree_skb_any(skb);
613 return -EIO; 644 return -EIO;
614 } 645 }
@@ -674,7 +705,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
674 u32 trans = ssb_dma_translation(ring->dev->dev); 705 u32 trans = ssb_dma_translation(ring->dev->dev);
675 706
676 if (ring->tx) { 707 if (ring->tx) {
677 if (ring->dma64) { 708 if (ring->type == B43legacy_DMA_64BIT) {
678 u64 ringbase = (u64)(ring->dmabase); 709 u64 ringbase = (u64)(ring->dmabase);
679 710
680 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 711 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -709,7 +740,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
709 err = alloc_initial_descbuffers(ring); 740 err = alloc_initial_descbuffers(ring);
710 if (err) 741 if (err)
711 goto out; 742 goto out;
712 if (ring->dma64) { 743 if (ring->type == B43legacy_DMA_64BIT) {
713 u64 ringbase = (u64)(ring->dmabase); 744 u64 ringbase = (u64)(ring->dmabase);
714 745
715 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 746 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -760,16 +791,16 @@ static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
760{ 791{
761 if (ring->tx) { 792 if (ring->tx) {
762 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 793 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
763 ring->dma64); 794 ring->type);
764 if (ring->dma64) { 795 if (ring->type == B43legacy_DMA_64BIT) {
765 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0); 796 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
766 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0); 797 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
767 } else 798 } else
768 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); 799 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
769 } else { 800 } else {
770 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 801 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
771 ring->dma64); 802 ring->type);
772 if (ring->dma64) { 803 if (ring->type == B43legacy_DMA_64BIT) {
773 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0); 804 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
774 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0); 805 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
775 } else 806 } else
@@ -824,11 +855,10 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev)
824 855
825/* Main initialization function. */ 856/* Main initialization function. */
826static 857static
827struct b43legacy_dmaring *b43legacy_setup_dmaring( 858struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
828 struct b43legacy_wldev *dev, 859 int controller_index,
829 int controller_index, 860 int for_tx,
830 int for_tx, 861 enum b43legacy_dmatype type)
831 int dma64)
832{ 862{
833 struct b43legacy_dmaring *ring; 863 struct b43legacy_dmaring *ring;
834 int err; 864 int err;
@@ -838,6 +868,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
838 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 868 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
839 if (!ring) 869 if (!ring)
840 goto out; 870 goto out;
871 ring->type = type;
841 872
842 nr_slots = B43legacy_RXRING_SLOTS; 873 nr_slots = B43legacy_RXRING_SLOTS;
843 if (for_tx) 874 if (for_tx)
@@ -855,12 +886,12 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
855 goto err_kfree_meta; 886 goto err_kfree_meta;
856 887
857 /* test for ability to dma to txhdr_cache */ 888 /* test for ability to dma to txhdr_cache */
858 dma_test = dma_map_single(dev->dev->dev, 889 dma_test = dma_map_single(dev->dev->dev, ring->txhdr_cache,
859 ring->txhdr_cache, 890 sizeof(struct b43legacy_txhdr_fw3),
860 sizeof(struct b43legacy_txhdr_fw3), 891 DMA_TO_DEVICE);
861 DMA_TO_DEVICE);
862 892
863 if (dma_mapping_error(dma_test)) { 893 if (b43legacy_dma_mapping_error(ring, dma_test,
894 sizeof(struct b43legacy_txhdr_fw3))) {
864 /* ugh realloc */ 895 /* ugh realloc */
865 kfree(ring->txhdr_cache); 896 kfree(ring->txhdr_cache);
866 ring->txhdr_cache = kcalloc(nr_slots, 897 ring->txhdr_cache = kcalloc(nr_slots,
@@ -874,7 +905,8 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
874 sizeof(struct b43legacy_txhdr_fw3), 905 sizeof(struct b43legacy_txhdr_fw3),
875 DMA_TO_DEVICE); 906 DMA_TO_DEVICE);
876 907
877 if (dma_mapping_error(dma_test)) 908 if (b43legacy_dma_mapping_error(ring, dma_test,
909 sizeof(struct b43legacy_txhdr_fw3)))
878 goto err_kfree_txhdr_cache; 910 goto err_kfree_txhdr_cache;
879 } 911 }
880 912
@@ -885,11 +917,9 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(
885 917
886 ring->dev = dev; 918 ring->dev = dev;
887 ring->nr_slots = nr_slots; 919 ring->nr_slots = nr_slots;
888 ring->mmio_base = b43legacy_dmacontroller_base(dma64, 920 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
889 controller_index);
890 ring->index = controller_index; 921 ring->index = controller_index;
891 ring->dma64 = !!dma64; 922 if (type == B43legacy_DMA_64BIT)
892 if (dma64)
893 ring->ops = &dma64_ops; 923 ring->ops = &dma64_ops;
894 else 924 else
895 ring->ops = &dma32_ops; 925 ring->ops = &dma32_ops;
@@ -939,10 +969,10 @@ static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
939 if (!ring) 969 if (!ring)
940 return; 970 return;
941 971
942 b43legacydbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots:" 972 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
943 " %d/%d\n", (ring->dma64) ? "64" : "32", ring->mmio_base, 973 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
944 (ring->tx) ? "TX" : "RX", 974 (ring->tx) ? "TX" : "RX", ring->max_used_slots,
945 ring->max_used_slots, ring->nr_slots); 975 ring->nr_slots);
946 /* Device IRQs are disabled prior entering this function, 976 /* Device IRQs are disabled prior entering this function,
947 * so no need to take care of concurrency with rx handler stuff. 977 * so no need to take care of concurrency with rx handler stuff.
948 */ 978 */
@@ -988,11 +1018,22 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
988 struct b43legacy_dmaring *ring; 1018 struct b43legacy_dmaring *ring;
989 int err; 1019 int err;
990 u64 dmamask; 1020 u64 dmamask;
991 int dma64 = 0; 1021 enum b43legacy_dmatype type;
992 1022
993 dmamask = supported_dma_mask(dev); 1023 dmamask = supported_dma_mask(dev);
994 if (dmamask == DMA_64BIT_MASK) 1024 switch (dmamask) {
995 dma64 = 1; 1025 default:
1026 B43legacy_WARN_ON(1);
1027 case DMA_30BIT_MASK:
1028 type = B43legacy_DMA_30BIT;
1029 break;
1030 case DMA_32BIT_MASK:
1031 type = B43legacy_DMA_32BIT;
1032 break;
1033 case DMA_64BIT_MASK:
1034 type = B43legacy_DMA_64BIT;
1035 break;
1036 }
996 1037
997 err = ssb_dma_set_mask(dev->dev, dmamask); 1038 err = ssb_dma_set_mask(dev->dev, dmamask);
998 if (err) { 1039 if (err) {
@@ -1010,52 +1051,50 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
1010 1051
1011 err = -ENOMEM; 1052 err = -ENOMEM;
1012 /* setup TX DMA channels. */ 1053 /* setup TX DMA channels. */
1013 ring = b43legacy_setup_dmaring(dev, 0, 1, dma64); 1054 ring = b43legacy_setup_dmaring(dev, 0, 1, type);
1014 if (!ring) 1055 if (!ring)
1015 goto out; 1056 goto out;
1016 dma->tx_ring0 = ring; 1057 dma->tx_ring0 = ring;
1017 1058
1018 ring = b43legacy_setup_dmaring(dev, 1, 1, dma64); 1059 ring = b43legacy_setup_dmaring(dev, 1, 1, type);
1019 if (!ring) 1060 if (!ring)
1020 goto err_destroy_tx0; 1061 goto err_destroy_tx0;
1021 dma->tx_ring1 = ring; 1062 dma->tx_ring1 = ring;
1022 1063
1023 ring = b43legacy_setup_dmaring(dev, 2, 1, dma64); 1064 ring = b43legacy_setup_dmaring(dev, 2, 1, type);
1024 if (!ring) 1065 if (!ring)
1025 goto err_destroy_tx1; 1066 goto err_destroy_tx1;
1026 dma->tx_ring2 = ring; 1067 dma->tx_ring2 = ring;
1027 1068
1028 ring = b43legacy_setup_dmaring(dev, 3, 1, dma64); 1069 ring = b43legacy_setup_dmaring(dev, 3, 1, type);
1029 if (!ring) 1070 if (!ring)
1030 goto err_destroy_tx2; 1071 goto err_destroy_tx2;
1031 dma->tx_ring3 = ring; 1072 dma->tx_ring3 = ring;
1032 1073
1033 ring = b43legacy_setup_dmaring(dev, 4, 1, dma64); 1074 ring = b43legacy_setup_dmaring(dev, 4, 1, type);
1034 if (!ring) 1075 if (!ring)
1035 goto err_destroy_tx3; 1076 goto err_destroy_tx3;
1036 dma->tx_ring4 = ring; 1077 dma->tx_ring4 = ring;
1037 1078
1038 ring = b43legacy_setup_dmaring(dev, 5, 1, dma64); 1079 ring = b43legacy_setup_dmaring(dev, 5, 1, type);
1039 if (!ring) 1080 if (!ring)
1040 goto err_destroy_tx4; 1081 goto err_destroy_tx4;
1041 dma->tx_ring5 = ring; 1082 dma->tx_ring5 = ring;
1042 1083
1043 /* setup RX DMA channels. */ 1084 /* setup RX DMA channels. */
1044 ring = b43legacy_setup_dmaring(dev, 0, 0, dma64); 1085 ring = b43legacy_setup_dmaring(dev, 0, 0, type);
1045 if (!ring) 1086 if (!ring)
1046 goto err_destroy_tx5; 1087 goto err_destroy_tx5;
1047 dma->rx_ring0 = ring; 1088 dma->rx_ring0 = ring;
1048 1089
1049 if (dev->dev->id.revision < 5) { 1090 if (dev->dev->id.revision < 5) {
1050 ring = b43legacy_setup_dmaring(dev, 3, 0, dma64); 1091 ring = b43legacy_setup_dmaring(dev, 3, 0, type);
1051 if (!ring) 1092 if (!ring)
1052 goto err_destroy_rx0; 1093 goto err_destroy_rx0;
1053 dma->rx_ring3 = ring; 1094 dma->rx_ring3 = ring;
1054 } 1095 }
1055 1096
1056 b43legacydbg(dev->wl, "%d-bit DMA initialized\n", 1097 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
1057 (dmamask == DMA_64BIT_MASK) ? 64 :
1058 (dmamask == DMA_32BIT_MASK) ? 32 : 30);
1059 err = 0; 1098 err = 0;
1060out: 1099out:
1061 return err; 1100 return err;
@@ -1194,9 +1233,13 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1194 } 1233 }
1195 1234
1196 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1235 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1197 sizeof(struct b43legacy_txhdr_fw3), 1); 1236 sizeof(struct b43legacy_txhdr_fw3), 1);
1198 if (dma_mapping_error(meta_hdr->dmaaddr)) 1237 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1238 sizeof(struct b43legacy_txhdr_fw3))) {
1239 ring->current_slot = old_top_slot;
1240 ring->used_slots = old_used_slots;
1199 return -EIO; 1241 return -EIO;
1242 }
1200 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1243 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1201 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); 1244 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1202 1245
@@ -1211,7 +1254,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1211 1254
1212 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1255 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1213 /* create a bounce buffer in zone_dma on mapping failure. */ 1256 /* create a bounce buffer in zone_dma on mapping failure. */
1214 if (dma_mapping_error(meta->dmaaddr)) { 1257 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
1215 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1258 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1216 if (!bounce_skb) { 1259 if (!bounce_skb) {
1217 ring->current_slot = old_top_slot; 1260 ring->current_slot = old_top_slot;
@@ -1225,7 +1268,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1225 skb = bounce_skb; 1268 skb = bounce_skb;
1226 meta->skb = skb; 1269 meta->skb = skb;
1227 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1270 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1228 if (dma_mapping_error(meta->dmaaddr)) { 1271 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
1229 ring->current_slot = old_top_slot; 1272 ring->current_slot = old_top_slot;
1230 ring->used_slots = old_used_slots; 1273 ring->used_slots = old_used_slots;
1231 err = -EIO; 1274 err = -EIO;
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 26f6ab08de75..2dd488c5be2d 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -218,6 +218,12 @@ struct b43legacy_dma_ops {
218 void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot); 218 void (*set_current_rxslot)(struct b43legacy_dmaring *ring, int slot);
219}; 219};
220 220
221enum b43legacy_dmatype {
222 B43legacy_DMA_30BIT = 30,
223 B43legacy_DMA_32BIT = 32,
224 B43legacy_DMA_64BIT = 64,
225};
226
221struct b43legacy_dmaring { 227struct b43legacy_dmaring {
222 /* Lowlevel DMA ops. */ 228 /* Lowlevel DMA ops. */
223 const struct b43legacy_dma_ops *ops; 229 const struct b43legacy_dma_ops *ops;
@@ -250,8 +256,8 @@ struct b43legacy_dmaring {
250 int index; 256 int index;
251 /* Boolean. Is this a TX ring? */ 257 /* Boolean. Is this a TX ring? */
252 bool tx; 258 bool tx;
253 /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */ 259 /* The type of DMA engine used. */
254 bool dma64; 260 enum b43legacy_dmatype type;
255 /* Boolean. Is this ring stopped at ieee80211 level? */ 261 /* Boolean. Is this ring stopped at ieee80211 level? */
256 bool stopped; 262 bool stopped;
257 /* Lock, only used for TX. */ 263 /* Lock, only used for TX. */
@@ -284,15 +290,6 @@ void b43legacy_dma_write(struct b43legacy_dmaring *ring,
284int b43legacy_dma_init(struct b43legacy_wldev *dev); 290int b43legacy_dma_init(struct b43legacy_wldev *dev);
285void b43legacy_dma_free(struct b43legacy_wldev *dev); 291void b43legacy_dma_free(struct b43legacy_wldev *dev);
286 292
287int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
288 u16 dmacontroller_mmio_base,
289 int dma64);
290int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
291 u16 dmacontroller_mmio_base,
292 int dma64);
293
294u16 b43legacy_dmacontroller_base(int dma64bit, int dmacontroller_idx);
295
296void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev); 293void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev);
297void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev); 294void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev);
298 295
@@ -320,20 +317,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
320{ 317{
321} 318}
322static inline 319static inline
323int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
324 u16 dmacontroller_mmio_base,
325 int dma64)
326{
327 return 0;
328}
329static inline
330int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
331 u16 dmacontroller_mmio_base,
332 int dma64)
333{
334 return 0;
335}
336static inline
337void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev, 320void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
338 struct ieee80211_tx_queue_stats *stats) 321 struct ieee80211_tx_queue_stats *stats)
339{ 322{
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 53f7f2e97615..c39de422e220 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3,7 +3,7 @@
3 * Broadcom B43legacy wireless driver 3 * Broadcom B43legacy wireless driver
4 * 4 *
5 * Copyright (c) 2005 Martin Langer <martin-langer@gmx.de> 5 * Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>
6 * Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> 6 * Copyright (c) 2005-2008 Stefano Brivio <stefano.brivio@polimi.it>
7 * Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> 7 * Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8 * Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> 8 * Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
9 * Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> 9 * Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
@@ -60,6 +60,8 @@ MODULE_AUTHOR("Stefano Brivio");
60MODULE_AUTHOR("Michael Buesch"); 60MODULE_AUTHOR("Michael Buesch");
61MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
62 62
63MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
64
63#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO) 65#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
64static int modparam_pio; 66static int modparam_pio;
65module_param_named(pio, modparam_pio, int, 0444); 67module_param_named(pio, modparam_pio, int, 0444);
@@ -1640,10 +1642,11 @@ static int b43legacy_upload_microcode(struct b43legacy_wldev *dev)
1640 err = -EOPNOTSUPP; 1642 err = -EOPNOTSUPP;
1641 goto error; 1643 goto error;
1642 } 1644 }
1643 b43legacydbg(dev->wl, "Loading firmware version 0x%X, patch level %u " 1645 b43legacyinfo(dev->wl, "Loading firmware version 0x%X, patch level %u "
1644 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch, 1646 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch,
1645 (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, 1647 (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF,
1646 (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); 1648 (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F,
1649 fwtime & 0x1F);
1647 1650
1648 dev->fw.rev = fwrev; 1651 dev->fw.rev = fwrev;
1649 dev->fw.patch = fwpatch; 1652 dev->fw.patch = fwpatch;
@@ -3806,6 +3809,32 @@ static struct ssb_driver b43legacy_ssb_driver = {
3806 .resume = b43legacy_resume, 3809 .resume = b43legacy_resume,
3807}; 3810};
3808 3811
3812static void b43legacy_print_driverinfo(void)
3813{
3814 const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "",
3815 *feat_pio = "", *feat_dma = "";
3816
3817#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
3818 feat_pci = "P";
3819#endif
3820#ifdef CONFIG_B43LEGACY_LEDS
3821 feat_leds = "L";
3822#endif
3823#ifdef CONFIG_B43LEGACY_RFKILL
3824 feat_rfkill = "R";
3825#endif
3826#ifdef CONFIG_B43LEGACY_PIO
3827 feat_pio = "I";
3828#endif
3829#ifdef CONFIG_B43LEGACY_DMA
3830 feat_dma = "D";
3831#endif
3832 printk(KERN_INFO "Broadcom 43xx driver loaded "
3833 "[ Features: %s%s%s%s%s, Firmware-ID: "
3834 B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
3835 feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
3836}
3837
3809static int __init b43legacy_init(void) 3838static int __init b43legacy_init(void)
3810{ 3839{
3811 int err; 3840 int err;
@@ -3816,6 +3845,8 @@ static int __init b43legacy_init(void)
3816 if (err) 3845 if (err)
3817 goto err_dfs_exit; 3846 goto err_dfs_exit;
3818 3847
3848 b43legacy_print_driverinfo();
3849
3819 return err; 3850 return err;
3820 3851
3821err_dfs_exit: 3852err_dfs_exit:
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 3e6ad7b92c83..a56d9fc6354f 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -3365,7 +3365,6 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
3365 /* Set us so that we have processed and used all buffers, but have 3365 /* Set us so that we have processed and used all buffers, but have
3366 * not restocked the Rx queue with fresh buffers */ 3366 * not restocked the Rx queue with fresh buffers */
3367 rxq->read = rxq->write = 0; 3367 rxq->read = rxq->write = 0;
3368 rxq->processed = RX_QUEUE_SIZE - 1;
3369 rxq->free_count = 0; 3368 rxq->free_count = 0;
3370 spin_unlock_irqrestore(&rxq->lock, flags); 3369 spin_unlock_irqrestore(&rxq->lock, flags);
3371} 3370}
@@ -3607,7 +3606,22 @@ static int ipw_load(struct ipw_priv *priv)
3607 * Driver allocates buffers of this size for Rx 3606 * Driver allocates buffers of this size for Rx
3608 */ 3607 */
3609 3608
3610static inline int ipw_queue_space(const struct clx2_queue *q) 3609/**
3610 * ipw_rx_queue_space - Return number of free slots available in queue.
3611 */
3612static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3613{
3614 int s = q->read - q->write;
3615 if (s <= 0)
3616 s += RX_QUEUE_SIZE;
3617 /* keep some buffer to not confuse full and empty queue */
3618 s -= 2;
3619 if (s < 0)
3620 s = 0;
3621 return s;
3622}
3623
3624static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3611{ 3625{
3612 int s = q->last_used - q->first_empty; 3626 int s = q->last_used - q->first_empty;
3613 if (s <= 0) 3627 if (s <= 0)
@@ -4947,7 +4961,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4947 priv->tx_packets++; 4961 priv->tx_packets++;
4948 } 4962 }
4949 done: 4963 done:
4950 if ((ipw_queue_space(q) > q->low_mark) && 4964 if ((ipw_tx_queue_space(q) > q->low_mark) &&
4951 (qindex >= 0) && 4965 (qindex >= 0) &&
4952 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev)) 4966 (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4953 netif_wake_queue(priv->net_dev); 4967 netif_wake_queue(priv->net_dev);
@@ -4965,7 +4979,7 @@ static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4965 struct clx2_queue *q = &txq->q; 4979 struct clx2_queue *q = &txq->q;
4966 struct tfd_frame *tfd; 4980 struct tfd_frame *tfd;
4967 4981
4968 if (ipw_queue_space(q) < (sync ? 1 : 2)) { 4982 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
4969 IPW_ERROR("No space for Tx\n"); 4983 IPW_ERROR("No space for Tx\n");
4970 return -EBUSY; 4984 return -EBUSY;
4971 } 4985 }
@@ -5070,7 +5084,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv)
5070 5084
5071 spin_lock_irqsave(&rxq->lock, flags); 5085 spin_lock_irqsave(&rxq->lock, flags);
5072 write = rxq->write; 5086 write = rxq->write;
5073 while ((rxq->write != rxq->processed) && (rxq->free_count)) { 5087 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5074 element = rxq->rx_free.next; 5088 element = rxq->rx_free.next;
5075 rxb = list_entry(element, struct ipw_rx_mem_buffer, list); 5089 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5076 list_del(element); 5090 list_del(element);
@@ -5187,7 +5201,6 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5187 /* Set us so that we have processed and used all buffers, but have 5201 /* Set us so that we have processed and used all buffers, but have
5188 * not restocked the Rx queue with fresh buffers */ 5202 * not restocked the Rx queue with fresh buffers */
5189 rxq->read = rxq->write = 0; 5203 rxq->read = rxq->write = 0;
5190 rxq->processed = RX_QUEUE_SIZE - 1;
5191 rxq->free_count = 0; 5204 rxq->free_count = 0;
5192 5205
5193 return rxq; 5206 return rxq;
@@ -8223,13 +8236,17 @@ static void ipw_rx(struct ipw_priv *priv)
8223 struct ieee80211_hdr_4addr *header; 8236 struct ieee80211_hdr_4addr *header;
8224 u32 r, w, i; 8237 u32 r, w, i;
8225 u8 network_packet; 8238 u8 network_packet;
8239 u8 fill_rx = 0;
8226 DECLARE_MAC_BUF(mac); 8240 DECLARE_MAC_BUF(mac);
8227 DECLARE_MAC_BUF(mac2); 8241 DECLARE_MAC_BUF(mac2);
8228 DECLARE_MAC_BUF(mac3); 8242 DECLARE_MAC_BUF(mac3);
8229 8243
8230 r = ipw_read32(priv, IPW_RX_READ_INDEX); 8244 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8231 w = ipw_read32(priv, IPW_RX_WRITE_INDEX); 8245 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8232 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE; 8246 i = priv->rxq->read;
8247
8248 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8249 fill_rx = 1;
8233 8250
8234 while (i != r) { 8251 while (i != r) {
8235 rxb = priv->rxq->queue[i]; 8252 rxb = priv->rxq->queue[i];
@@ -8404,11 +8421,17 @@ static void ipw_rx(struct ipw_priv *priv)
8404 list_add_tail(&rxb->list, &priv->rxq->rx_used); 8421 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8405 8422
8406 i = (i + 1) % RX_QUEUE_SIZE; 8423 i = (i + 1) % RX_QUEUE_SIZE;
8424
8425 /* If there are a lot of unsued frames, restock the Rx queue
8426 * so the ucode won't assert */
8427 if (fill_rx) {
8428 priv->rxq->read = i;
8429 ipw_rx_queue_replenish(priv);
8430 }
8407 } 8431 }
8408 8432
8409 /* Backtrack one entry */ 8433 /* Backtrack one entry */
8410 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1; 8434 priv->rxq->read = i;
8411
8412 ipw_rx_queue_restock(priv); 8435 ipw_rx_queue_restock(priv);
8413} 8436}
8414 8437
@@ -10336,7 +10359,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10336 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); 10359 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10337 ipw_write32(priv, q->reg_w, q->first_empty); 10360 ipw_write32(priv, q->reg_w, q->first_empty);
10338 10361
10339 if (ipw_queue_space(q) < q->high_mark) 10362 if (ipw_tx_queue_space(q) < q->high_mark)
10340 netif_stop_queue(priv->net_dev); 10363 netif_stop_queue(priv->net_dev);
10341 10364
10342 return NETDEV_TX_OK; 10365 return NETDEV_TX_OK;
@@ -10357,7 +10380,7 @@ static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10357 struct clx2_tx_queue *txq = &priv->txq[0]; 10380 struct clx2_tx_queue *txq = &priv->txq[0];
10358#endif /* CONFIG_IPW2200_QOS */ 10381#endif /* CONFIG_IPW2200_QOS */
10359 10382
10360 if (ipw_queue_space(&txq->q) < txq->q.high_mark) 10383 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10361 return 1; 10384 return 1;
10362 10385
10363 return 0; 10386 return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 5ee1ad69898b..40b71bc2c4a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -687,6 +687,12 @@ static int iwl3945_enqueue_hcmd(struct iwl3945_priv *priv, struct iwl3945_host_c
687 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 687 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
688 !(cmd->meta.flags & CMD_SIZE_HUGE)); 688 !(cmd->meta.flags & CMD_SIZE_HUGE));
689 689
690
691 if (iwl3945_is_rfkill(priv)) {
692 IWL_DEBUG_INFO("Not sending command - RF KILL");
693 return -EIO;
694 }
695
690 if (iwl3945_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { 696 if (iwl3945_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
691 IWL_ERROR("No space for Tx\n"); 697 IWL_ERROR("No space for Tx\n");
692 return -ENOSPC; 698 return -ENOSPC;
@@ -1580,7 +1586,7 @@ static inline int iwl3945_eeprom_acquire_semaphore(struct iwl3945_priv *priv)
1580 */ 1586 */
1581int iwl3945_eeprom_init(struct iwl3945_priv *priv) 1587int iwl3945_eeprom_init(struct iwl3945_priv *priv)
1582{ 1588{
1583 __le16 *e = (__le16 *)&priv->eeprom; 1589 u16 *e = (u16 *)&priv->eeprom;
1584 u32 gp = iwl3945_read32(priv, CSR_EEPROM_GP); 1590 u32 gp = iwl3945_read32(priv, CSR_EEPROM_GP);
1585 u32 r; 1591 u32 r;
1586 int sz = sizeof(priv->eeprom); 1592 int sz = sizeof(priv->eeprom);
@@ -1623,7 +1629,7 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
1623 IWL_ERROR("Time out reading EEPROM[%d]", addr); 1629 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1624 return -ETIMEDOUT; 1630 return -ETIMEDOUT;
1625 } 1631 }
1626 e[addr / 2] = cpu_to_le16(r >> 16); 1632 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
1627 } 1633 }
1628 1634
1629 return 0; 1635 return 0;
@@ -2806,7 +2812,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2806#endif 2812#endif
2807 2813
2808 /* drop all data frame if we are not associated */ 2814 /* drop all data frame if we are not associated */
2809 if ((!iwl3945_is_associated(priv) || !priv->assoc_id) && 2815 if ((!iwl3945_is_associated(priv) ||
2816 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id)) &&
2810 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { 2817 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
2811 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n"); 2818 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
2812 goto drop_unlock; 2819 goto drop_unlock;
@@ -4281,7 +4288,7 @@ static void iwl3945_rx_handle(struct iwl3945_priv *priv)
4281 int reclaim; 4288 int reclaim;
4282 unsigned long flags; 4289 unsigned long flags;
4283 u8 fill_rx = 0; 4290 u8 fill_rx = 0;
4284 u32 count = 0; 4291 u32 count = 8;
4285 4292
4286 /* uCode's read index (stored in shared DRAM) indicates the last Rx 4293 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4287 * buffer that the driver may process (last buffer filled by ucode). */ 4294 * buffer that the driver may process (last buffer filled by ucode). */
@@ -6256,6 +6263,8 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
6256 STATUS_RF_KILL_HW | 6263 STATUS_RF_KILL_HW |
6257 test_bit(STATUS_RF_KILL_SW, &priv->status) << 6264 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6258 STATUS_RF_KILL_SW | 6265 STATUS_RF_KILL_SW |
6266 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
6267 STATUS_GEO_CONFIGURED |
6259 test_bit(STATUS_IN_SUSPEND, &priv->status) << 6268 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6260 STATUS_IN_SUSPEND; 6269 STATUS_IN_SUSPEND;
6261 goto exit; 6270 goto exit;
@@ -6267,6 +6276,8 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
6267 STATUS_RF_KILL_HW | 6276 STATUS_RF_KILL_HW |
6268 test_bit(STATUS_RF_KILL_SW, &priv->status) << 6277 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6269 STATUS_RF_KILL_SW | 6278 STATUS_RF_KILL_SW |
6279 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
6280 STATUS_GEO_CONFIGURED |
6270 test_bit(STATUS_IN_SUSPEND, &priv->status) << 6281 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6271 STATUS_IN_SUSPEND | 6282 STATUS_IN_SUSPEND |
6272 test_bit(STATUS_FW_ERROR, &priv->status) << 6283 test_bit(STATUS_FW_ERROR, &priv->status) <<
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index f423241b9567..a23d4798653b 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -692,6 +692,11 @@ static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_c
692 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 692 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
693 !(cmd->meta.flags & CMD_SIZE_HUGE)); 693 !(cmd->meta.flags & CMD_SIZE_HUGE));
694 694
695 if (iwl4965_is_rfkill(priv)) {
696 IWL_DEBUG_INFO("Not sending command - RF KILL");
697 return -EIO;
698 }
699
695 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { 700 if (iwl4965_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
696 IWL_ERROR("No space for Tx\n"); 701 IWL_ERROR("No space for Tx\n");
697 return -ENOSPC; 702 return -ENOSPC;
@@ -1654,7 +1659,7 @@ static inline void iwl4965_eeprom_release_semaphore(struct iwl4965_priv *priv)
1654 */ 1659 */
1655int iwl4965_eeprom_init(struct iwl4965_priv *priv) 1660int iwl4965_eeprom_init(struct iwl4965_priv *priv)
1656{ 1661{
1657 __le16 *e = (__le16 *)&priv->eeprom; 1662 u16 *e = (u16 *)&priv->eeprom;
1658 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP); 1663 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
1659 u32 r; 1664 u32 r;
1660 int sz = sizeof(priv->eeprom); 1665 int sz = sizeof(priv->eeprom);
@@ -1698,7 +1703,7 @@ int iwl4965_eeprom_init(struct iwl4965_priv *priv)
1698 rc = -ETIMEDOUT; 1703 rc = -ETIMEDOUT;
1699 goto done; 1704 goto done;
1700 } 1705 }
1701 e[addr / 2] = cpu_to_le16(r >> 16); 1706 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
1702 } 1707 }
1703 rc = 0; 1708 rc = 0;
1704 1709
@@ -2935,7 +2940,7 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
2935 /* drop all data frame if we are not associated */ 2940 /* drop all data frame if we are not associated */
2936 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && 2941 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
2937 (!iwl4965_is_associated(priv) || 2942 (!iwl4965_is_associated(priv) ||
2938 !priv->assoc_id || 2943 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
2939 !priv->assoc_station_added)) { 2944 !priv->assoc_station_added)) {
2940 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n"); 2945 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n");
2941 goto drop_unlock; 2946 goto drop_unlock;
@@ -4664,7 +4669,7 @@ static void iwl4965_rx_handle(struct iwl4965_priv *priv)
4664 int reclaim; 4669 int reclaim;
4665 unsigned long flags; 4670 unsigned long flags;
4666 u8 fill_rx = 0; 4671 u8 fill_rx = 0;
4667 u32 count = 0; 4672 u32 count = 8;
4668 4673
4669 /* uCode's read index (stored in shared DRAM) indicates the last Rx 4674 /* uCode's read index (stored in shared DRAM) indicates the last Rx
4670 * buffer that the driver may process (last buffer filled by ucode). */ 4675 * buffer that the driver may process (last buffer filled by ucode). */
@@ -6680,6 +6685,8 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
6680 STATUS_RF_KILL_HW | 6685 STATUS_RF_KILL_HW |
6681 test_bit(STATUS_RF_KILL_SW, &priv->status) << 6686 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6682 STATUS_RF_KILL_SW | 6687 STATUS_RF_KILL_SW |
6688 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
6689 STATUS_GEO_CONFIGURED |
6683 test_bit(STATUS_IN_SUSPEND, &priv->status) << 6690 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6684 STATUS_IN_SUSPEND; 6691 STATUS_IN_SUSPEND;
6685 goto exit; 6692 goto exit;
@@ -6691,6 +6698,8 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
6691 STATUS_RF_KILL_HW | 6698 STATUS_RF_KILL_HW |
6692 test_bit(STATUS_RF_KILL_SW, &priv->status) << 6699 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
6693 STATUS_RF_KILL_SW | 6700 STATUS_RF_KILL_SW |
6701 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
6702 STATUS_GEO_CONFIGURED |
6694 test_bit(STATUS_IN_SUSPEND, &priv->status) << 6703 test_bit(STATUS_IN_SUSPEND, &priv->status) <<
6695 STATUS_IN_SUSPEND | 6704 STATUS_IN_SUSPEND |
6696 test_bit(STATUS_FW_ERROR, &priv->status) << 6705 test_bit(STATUS_FW_ERROR, &priv->status) <<
diff --git a/drivers/net/wireless/p54usb.c b/drivers/net/wireless/p54usb.c
index 60d286eb0b8b..e7d4aee8799e 100644
--- a/drivers/net/wireless/p54usb.c
+++ b/drivers/net/wireless/p54usb.c
@@ -35,6 +35,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
35 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ 35 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
36 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ 36 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
37 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ 37 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
38 {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
38 {USB_DEVICE(0x0846, 0x4200)}, /* Netgear WG121 */ 39 {USB_DEVICE(0x0846, 0x4200)}, /* Netgear WG121 */
39 {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ 40 {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */
40 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ 41 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
@@ -62,6 +63,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
62 {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */ 63 {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */
63 {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */ 64 {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */
64 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ 65 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
66 {USB_DEVICE(0x13b1, 0x000a)}, /* Linksys WUSB54G ver 2 */
65 {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ 67 {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
66 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 68 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
67 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 69 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index d3ecf89abd93..8ce2ddf8024f 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2300,7 +2300,7 @@ static void rndis_update_wireless_stats(struct work_struct *work)
2300 struct usbnet *usbdev = priv->usbdev; 2300 struct usbnet *usbdev = priv->usbdev;
2301 struct iw_statistics iwstats; 2301 struct iw_statistics iwstats;
2302 __le32 rssi, tmp; 2302 __le32 rssi, tmp;
2303 int len, ret, bitrate, j; 2303 int len, ret, j;
2304 unsigned long flags; 2304 unsigned long flags;
2305 int update_jiffies = STATS_UPDATE_JIFFIES; 2305 int update_jiffies = STATS_UPDATE_JIFFIES;
2306 void *buf; 2306 void *buf;
@@ -2352,14 +2352,10 @@ static void rndis_update_wireless_stats(struct work_struct *work)
2352 if (ret == 0) 2352 if (ret == 0)
2353 iwstats.discard.misc += le32_to_cpu(tmp); 2353 iwstats.discard.misc += le32_to_cpu(tmp);
2354 2354
2355 /* Workaround transfer stalls on poor quality links. */ 2355 /* Workaround transfer stalls on poor quality links.
2356 len = sizeof(tmp); 2356 * TODO: find right way to fix these stalls (as stalls do not happen
2357 ret = rndis_query_oid(usbdev, OID_GEN_LINK_SPEED, &tmp, &len); 2357 * with ndiswrapper/windows driver). */
2358 if (ret == 0) { 2358 if (iwstats.qual.qual <= 25) {
2359 bitrate = le32_to_cpu(tmp) * 100;
2360 if (bitrate > 11000000)
2361 goto end;
2362
2363 /* Decrease stats worker interval to catch stalls. 2359 /* Decrease stats worker interval to catch stalls.
2364 * faster. Faster than 400-500ms causes packet loss, 2360 * faster. Faster than 400-500ms causes packet loss,
2365 * Slower doesn't catch stalls fast enough. 2361 * Slower doesn't catch stalls fast enough.
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 86ded4066f5b..4ca9730e5e92 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1839,11 +1839,11 @@ static struct usb_device_id rt2500usb_device_table[] = {
1839 /* Hercules */ 1839 /* Hercules */
1840 { USB_DEVICE(0x06f8, 0xe000), USB_DEVICE_DATA(&rt2500usb_ops) }, 1840 { USB_DEVICE(0x06f8, 0xe000), USB_DEVICE_DATA(&rt2500usb_ops) },
1841 /* Melco */ 1841 /* Melco */
1842 { USB_DEVICE(0x0411, 0x005e), USB_DEVICE_DATA(&rt2500usb_ops) },
1842 { USB_DEVICE(0x0411, 0x0066), USB_DEVICE_DATA(&rt2500usb_ops) }, 1843 { USB_DEVICE(0x0411, 0x0066), USB_DEVICE_DATA(&rt2500usb_ops) },
1843 { USB_DEVICE(0x0411, 0x0067), USB_DEVICE_DATA(&rt2500usb_ops) }, 1844 { USB_DEVICE(0x0411, 0x0067), USB_DEVICE_DATA(&rt2500usb_ops) },
1844 { USB_DEVICE(0x0411, 0x008b), USB_DEVICE_DATA(&rt2500usb_ops) }, 1845 { USB_DEVICE(0x0411, 0x008b), USB_DEVICE_DATA(&rt2500usb_ops) },
1845 { USB_DEVICE(0x0411, 0x0097), USB_DEVICE_DATA(&rt2500usb_ops) }, 1846 { USB_DEVICE(0x0411, 0x0097), USB_DEVICE_DATA(&rt2500usb_ops) },
1846
1847 /* MSI */ 1847 /* MSI */
1848 { USB_DEVICE(0x0db0, 0x6861), USB_DEVICE_DATA(&rt2500usb_ops) }, 1848 { USB_DEVICE(0x0db0, 0x6861), USB_DEVICE_DATA(&rt2500usb_ops) },
1849 { USB_DEVICE(0x0db0, 0x6865), USB_DEVICE_DATA(&rt2500usb_ops) }, 1849 { USB_DEVICE(0x0db0, 0x6865), USB_DEVICE_DATA(&rt2500usb_ops) },
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl8180_dev.c
index 27ebd689aa21..5e9a8ace0d81 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl8180_dev.c
@@ -135,13 +135,15 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
135 while (skb_queue_len(&ring->queue)) { 135 while (skb_queue_len(&ring->queue)) {
136 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; 136 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
137 struct sk_buff *skb; 137 struct sk_buff *skb;
138 struct ieee80211_tx_status status = { {0} }; 138 struct ieee80211_tx_status status;
139 struct ieee80211_tx_control *control; 139 struct ieee80211_tx_control *control;
140 u32 flags = le32_to_cpu(entry->flags); 140 u32 flags = le32_to_cpu(entry->flags);
141 141
142 if (flags & RTL8180_TX_DESC_FLAG_OWN) 142 if (flags & RTL8180_TX_DESC_FLAG_OWN)
143 return; 143 return;
144 144
145 memset(&status, 0, sizeof(status));
146
145 ring->idx = (ring->idx + 1) % ring->entries; 147 ring->idx = (ring->idx + 1) % ring->entries;
146 skb = __skb_dequeue(&ring->queue); 148 skb = __skb_dequeue(&ring->queue);
147 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), 149 pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 0d71716d750d..f44505994a0e 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -113,10 +113,12 @@ void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
113 113
114static void rtl8187_tx_cb(struct urb *urb) 114static void rtl8187_tx_cb(struct urb *urb)
115{ 115{
116 struct ieee80211_tx_status status = { {0} }; 116 struct ieee80211_tx_status status;
117 struct sk_buff *skb = (struct sk_buff *)urb->context; 117 struct sk_buff *skb = (struct sk_buff *)urb->context;
118 struct rtl8187_tx_info *info = (struct rtl8187_tx_info *)skb->cb; 118 struct rtl8187_tx_info *info = (struct rtl8187_tx_info *)skb->cb;
119 119
120 memset(&status, 0, sizeof(status));
121
120 usb_free_urb(info->urb); 122 usb_free_urb(info->urb);
121 if (info->control) 123 if (info->control)
122 memcpy(&status.control, info->control, sizeof(status.control)); 124 memcpy(&status.control, info->control, sizeof(status.control));
diff --git a/drivers/net/wireless/wavelan.h b/drivers/net/wireless/wavelan.h
index 27172cde5a39..9ab360558ffd 100644
--- a/drivers/net/wireless/wavelan.h
+++ b/drivers/net/wireless/wavelan.h
@@ -85,7 +85,7 @@ union hacs_u
85#define HASR_MMC_INTR 0x0002 /* Interrupt request from MMC */ 85#define HASR_MMC_INTR 0x0002 /* Interrupt request from MMC */
86#define HASR_MMC_BUSY 0x0004 /* MMC busy indication */ 86#define HASR_MMC_BUSY 0x0004 /* MMC busy indication */
87#define HASR_PSA_BUSY 0x0008 /* LAN parameter storage area busy */ 87#define HASR_PSA_BUSY 0x0008 /* LAN parameter storage area busy */
88}; 88} __attribute__ ((packed));
89 89
90typedef struct ha_t ha_t; 90typedef struct ha_t ha_t;
91struct ha_t 91struct ha_t
@@ -292,7 +292,7 @@ struct mmw_t
292#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */ 292#define MMW_EXT_ANT_INTERNAL 0x00 /* Internal antenna */
293#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */ 293#define MMW_EXT_ANT_EXTERNAL 0x03 /* External antenna */
294#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */ 294#define MMW_EXT_ANT_IQ_TEST 0x1C /* IQ test pattern (set to 0) */
295}; 295} __attribute__ ((packed));
296 296
297#define MMW_SIZE 37 297#define MMW_SIZE 37
298 298
@@ -347,7 +347,7 @@ struct mmr_t
347 unsigned char mmr_unused4[1]; /* unused */ 347 unsigned char mmr_unused4[1]; /* unused */
348 unsigned char mmr_fee_data_l; /* Read data from EEPROM (low) */ 348 unsigned char mmr_fee_data_l; /* Read data from EEPROM (low) */
349 unsigned char mmr_fee_data_h; /* Read data from EEPROM (high) */ 349 unsigned char mmr_fee_data_h; /* Read data from EEPROM (high) */
350}; 350} __attribute__ ((packed));
351 351
352#define MMR_SIZE 36 352#define MMR_SIZE 36
353 353
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 49127e4b42c2..76ef2d83919d 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -360,11 +360,14 @@ void zd_mac_tx_failed(struct ieee80211_hw *hw)
360{ 360{
361 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue; 361 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue;
362 struct sk_buff *skb; 362 struct sk_buff *skb;
363 struct ieee80211_tx_status status = {{0}}; 363 struct ieee80211_tx_status status;
364 364
365 skb = skb_dequeue(q); 365 skb = skb_dequeue(q);
366 if (skb == NULL) 366 if (skb == NULL)
367 return; 367 return;
368
369 memset(&status, 0, sizeof(status));
370
368 tx_status(hw, skb, &status, 0); 371 tx_status(hw, skb, &status, 0);
369} 372}
370 373
@@ -389,7 +392,8 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
389 if (unlikely(error || 392 if (unlikely(error ||
390 (cb->control->flags & IEEE80211_TXCTL_NO_ACK))) 393 (cb->control->flags & IEEE80211_TXCTL_NO_ACK)))
391 { 394 {
392 struct ieee80211_tx_status status = {{0}}; 395 struct ieee80211_tx_status status;
396 memset(&status, 0, sizeof(status));
393 tx_status(hw, skb, &status, !error); 397 tx_status(hw, skb, &status, !error);
394 } else { 398 } else {
395 struct sk_buff_head *q = 399 struct sk_buff_head *q =
@@ -603,7 +607,9 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
603 tx_hdr = (struct ieee80211_hdr *)skb->data; 607 tx_hdr = (struct ieee80211_hdr *)skb->data;
604 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) 608 if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1)))
605 { 609 {
606 struct ieee80211_tx_status status = {{0}}; 610 struct ieee80211_tx_status status;
611
612 memset(&status, 0, sizeof(status));
607 status.flags = IEEE80211_TX_STATUS_ACK; 613 status.flags = IEEE80211_TX_STATUS_ACK;
608 status.ack_signal = stats->ssi; 614 status.ack_signal = stats->ssi;
609 __skb_unlink(skb, q); 615 __skb_unlink(skb, q);
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 8134c7e198a5..b07ba2a14119 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -187,23 +187,22 @@ void sync_stop(void)
187 end_sync(); 187 end_sync();
188} 188}
189 189
190 190
191/* Optimisation. We can manage without taking the dcookie sem 191/* Optimisation. We can manage without taking the dcookie sem
192 * because we cannot reach this code without at least one 192 * because we cannot reach this code without at least one
193 * dcookie user still being registered (namely, the reader 193 * dcookie user still being registered (namely, the reader
194 * of the event buffer). */ 194 * of the event buffer). */
195static inline unsigned long fast_get_dcookie(struct dentry * dentry, 195static inline unsigned long fast_get_dcookie(struct path *path)
196 struct vfsmount * vfsmnt)
197{ 196{
198 unsigned long cookie; 197 unsigned long cookie;
199 198
200 if (dentry->d_cookie) 199 if (path->dentry->d_cookie)
201 return (unsigned long)dentry; 200 return (unsigned long)path->dentry;
202 get_dcookie(dentry, vfsmnt, &cookie); 201 get_dcookie(path, &cookie);
203 return cookie; 202 return cookie;
204} 203}
205 204
206 205
207/* Look up the dcookie for the task's first VM_EXECUTABLE mapping, 206/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
208 * which corresponds loosely to "application name". This is 207 * which corresponds loosely to "application name". This is
209 * not strictly necessary but allows oprofile to associate 208 * not strictly necessary but allows oprofile to associate
@@ -222,8 +221,7 @@ static unsigned long get_exec_dcookie(struct mm_struct * mm)
222 continue; 221 continue;
223 if (!(vma->vm_flags & VM_EXECUTABLE)) 222 if (!(vma->vm_flags & VM_EXECUTABLE))
224 continue; 223 continue;
225 cookie = fast_get_dcookie(vma->vm_file->f_path.dentry, 224 cookie = fast_get_dcookie(&vma->vm_file->f_path);
226 vma->vm_file->f_path.mnt);
227 break; 225 break;
228 } 226 }
229 227
@@ -248,8 +246,7 @@ static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, o
248 continue; 246 continue;
249 247
250 if (vma->vm_file) { 248 if (vma->vm_file) {
251 cookie = fast_get_dcookie(vma->vm_file->f_path.dentry, 249 cookie = fast_get_dcookie(&vma->vm_file->f_path);
252 vma->vm_file->f_path.mnt);
253 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - 250 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
254 vma->vm_start; 251 vma->vm_start;
255 } else { 252 } else {
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 238628d3a854..d76d37bcb9cc 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1768,7 +1768,7 @@ static int parport_PS2_supported(struct parport *pb)
1768} 1768}
1769 1769
1770#ifdef CONFIG_PARPORT_PC_FIFO 1770#ifdef CONFIG_PARPORT_PC_FIFO
1771static int __devinit parport_ECP_supported(struct parport *pb) 1771static int parport_ECP_supported(struct parport *pb)
1772{ 1772{
1773 int i; 1773 int i;
1774 int config, configb; 1774 int config, configb;
@@ -1992,7 +1992,7 @@ static int parport_ECPEPP_supported(struct parport *pb)
1992/* Don't bother probing for modes we know we won't use. */ 1992/* Don't bother probing for modes we know we won't use. */
1993static int __devinit parport_PS2_supported(struct parport *pb) { return 0; } 1993static int __devinit parport_PS2_supported(struct parport *pb) { return 0; }
1994#ifdef CONFIG_PARPORT_PC_FIFO 1994#ifdef CONFIG_PARPORT_PC_FIFO
1995static int __devinit parport_ECP_supported(struct parport *pb) { return 0; } 1995static int parport_ECP_supported(struct parport *pb) { return 0; }
1996#endif 1996#endif
1997static int __devinit parport_EPP_supported(struct parport *pb) { return 0; } 1997static int __devinit parport_EPP_supported(struct parport *pb) { return 0; }
1998static int __devinit parport_ECPEPP_supported(struct parport *pb){return 0;} 1998static int __devinit parport_ECPEPP_supported(struct parport *pb){return 0;}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 8ed26480371f..f941f609dbf3 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -14,11 +14,12 @@
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com> 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 * 21 *
21 * This file implements early detection/parsing of DMA Remapping Devices 22 * This file implements early detection/parsing of DMA Remapping Devices
22 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI 23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
23 * tables. 24 * tables.
24 */ 25 */
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index c8c263875c21..9279d5ba62e6 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -392,6 +392,9 @@ static int __init acpiphp_init(void)
392{ 392{
393 info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 393 info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
394 394
395 if (acpi_pci_disabled)
396 return 0;
397
395 acpiphp_debug = debug; 398 acpiphp_debug = debug;
396 399
397 /* read all the ACPI info from the system */ 400 /* read all the ACPI info from the system */
@@ -401,6 +404,9 @@ static int __init acpiphp_init(void)
401 404
402static void __exit acpiphp_exit(void) 405static void __exit acpiphp_exit(void)
403{ 406{
407 if (acpi_pci_disabled)
408 return;
409
404 /* deallocate internal data structures etc. */ 410 /* deallocate internal data structures etc. */
405 acpiphp_glue_exit(); 411 acpiphp_glue_exit();
406} 412}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 750ebd7a4c10..b0a22b92717e 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -395,33 +395,34 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
395{ 395{
396 acpi_handle *phandle = (acpi_handle *)context; 396 acpi_handle *phandle = (acpi_handle *)context;
397 acpi_status status; 397 acpi_status status;
398 struct acpi_device_info info; 398 struct acpi_device_info *info;
399 struct acpi_buffer info_buffer = { 399 struct acpi_buffer info_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
400 .length = sizeof(struct acpi_device_info), 400 int retval = 0;
401 .pointer = &info,
402 };
403 401
404 status = acpi_get_object_info(handle, &info_buffer); 402 status = acpi_get_object_info(handle, &info_buffer);
405 if (ACPI_FAILURE(status)) { 403 if (ACPI_FAILURE(status)) {
406 err("%s: Failed to get device information\n", __FUNCTION__); 404 err("%s: Failed to get device information status=0x%x\n",
407 return 0; 405 __FUNCTION__, status);
406 return retval;
408 } 407 }
409 info.hardware_id.value[sizeof(info.hardware_id.value) - 1] = '\0'; 408 info = info_buffer.pointer;
410 409 info->hardware_id.value[sizeof(info->hardware_id.value) - 1] = '\0';
411 if (info.current_status && (info.valid & ACPI_VALID_HID) && 410
412 (!strcmp(info.hardware_id.value, IBM_HARDWARE_ID1) || 411 if (info->current_status && (info->valid & ACPI_VALID_HID) &&
413 !strcmp(info.hardware_id.value, IBM_HARDWARE_ID2))) { 412 (!strcmp(info->hardware_id.value, IBM_HARDWARE_ID1) ||
414 dbg("found hardware: %s, handle: %p\n", info.hardware_id.value, 413 !strcmp(info->hardware_id.value, IBM_HARDWARE_ID2))) {
415 handle); 414 dbg("found hardware: %s, handle: %p\n",
415 info->hardware_id.value, handle);
416 *phandle = handle; 416 *phandle = handle;
417 /* returning non-zero causes the search to stop 417 /* returning non-zero causes the search to stop
418 * and returns this value to the caller of 418 * and returns this value to the caller of
419 * acpi_walk_namespace, but it also causes some warnings 419 * acpi_walk_namespace, but it also causes some warnings
420 * in the acpi debug code to print... 420 * in the acpi debug code to print...
421 */ 421 */
422 return FOUND_APCI; 422 retval = FOUND_APCI;
423 } 423 }
424 return 0; 424 kfree(info);
425 return retval;
425} 426}
426 427
427static int __init ibm_acpiphp_init(void) 428static int __init ibm_acpiphp_init(void)
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index a4c3089f892a..977d29b32295 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -14,9 +14,10 @@
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com> 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 */ 21 */
21 22
22#include <linux/init.h> 23#include <linux/init.h>
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
index 07f5f6353bda..afc0ad96122e 100644
--- a/drivers/pci/intel-iommu.h
+++ b/drivers/pci/intel-iommu.h
@@ -14,8 +14,9 @@
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com> 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
19 */ 20 */
20 21
21#ifndef _INTEL_IOMMU_H_ 22#ifndef _INTEL_IOMMU_H_
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 8de7ab6c6d0c..dbcdd6bfa63a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * This file is released under the GPLv2. 4 * This file is released under the GPLv2.
5 * 5 *
6 * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 */ 8 */
8 9
9#include "iova.h" 10#include "iova.h"
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
index d521b5b7319c..228f6c94b69c 100644
--- a/drivers/pci/iova.h
+++ b/drivers/pci/iova.h
@@ -3,7 +3,8 @@
3 * 3 *
4 * This file is released under the GPLv2. 4 * This file is released under the GPLv2.
5 * 5 *
6 * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 * 8 *
8 */ 9 */
9 10
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e569645d59e2..4a23654184fc 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -158,6 +158,7 @@ run_osc_out:
158/** 158/**
159 * __pci_osc_support_set - register OS support to Firmware 159 * __pci_osc_support_set - register OS support to Firmware
160 * @flags: OS support bits 160 * @flags: OS support bits
161 * @hid: hardware ID
161 * 162 *
162 * Update OS support fields and doing a _OSC Query to obtain an update 163 * Update OS support fields and doing a _OSC Query to obtain an update
163 * from Firmware on supported control bits. 164 * from Firmware on supported control bits.
@@ -241,8 +242,6 @@ EXPORT_SYMBOL(pci_osc_control_set);
241 * choose from highest power _SxD to lowest power _SxW 242 * choose from highest power _SxD to lowest power _SxW
242 * else // no _PRW at S-state x 243 * else // no _PRW at S-state x
243 * choose highest power _SxD or any lower power 244 * choose highest power _SxD or any lower power
244 *
245 * currently we simply return _SxD, if present.
246 */ 245 */
247 246
248static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev, 247static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ae3df46eaabf..183fddaa38b7 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -554,6 +554,7 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
554 case PM_EVENT_PRETHAW: 554 case PM_EVENT_PRETHAW:
555 /* REVISIT both freeze and pre-thaw "should" use D0 */ 555 /* REVISIT both freeze and pre-thaw "should" use D0 */
556 case PM_EVENT_SUSPEND: 556 case PM_EVENT_SUSPEND:
557 case PM_EVENT_HIBERNATE:
557 return PCI_D3hot; 558 return PCI_D3hot;
558 default: 559 default:
559 printk("Unrecognized suspend event %d\n", state.event); 560 printk("Unrecognized suspend event %d\n", state.event);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 68aeeb7206de..ef18fcd641e2 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -422,7 +422,7 @@ int pci_proc_detach_device(struct pci_dev *dev)
422 struct proc_dir_entry *e; 422 struct proc_dir_entry *e;
423 423
424 if ((e = dev->procent)) { 424 if ((e = dev->procent)) {
425 if (atomic_read(&e->count)) 425 if (atomic_read(&e->count) > 1)
426 return -EBUSY; 426 return -EBUSY;
427 remove_proc_entry(e->name, dev->bus->procdir); 427 remove_proc_entry(e->name, dev->bus->procdir);
428 dev->procent = NULL; 428 dev->procent = NULL;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 0a953d43b9a2..bbad4a9f264f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -867,13 +867,13 @@ static void quirk_disable_pxb(struct pci_dev *pdev)
867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); 867DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
868DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); 868DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
869 869
870 870static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
871static void __devinit quirk_sb600_sata(struct pci_dev *pdev)
872{ 871{
873 /* set sb600 sata to ahci mode */ 872 /* set sb600/sb700/sb800 sata to ahci mode */
874 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 873 u8 tmp;
875 u8 tmp;
876 874
875 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
876 if (tmp == 0x01) {
877 pci_read_config_byte(pdev, 0x40, &tmp); 877 pci_read_config_byte(pdev, 0x40, &tmp);
878 pci_write_config_byte(pdev, 0x40, tmp|1); 878 pci_write_config_byte(pdev, 0x40, tmp|1);
879 pci_write_config_byte(pdev, 0x9, 1); 879 pci_write_config_byte(pdev, 0x9, 1);
@@ -881,10 +881,13 @@ static void __devinit quirk_sb600_sata(struct pci_dev *pdev)
881 pci_write_config_byte(pdev, 0x40, tmp); 881 pci_write_config_byte(pdev, 0x40, tmp);
882 882
883 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI; 883 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
884 dev_info(&pdev->dev, "set SATA to AHCI mode\n");
884 } 885 }
885} 886}
886DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_sb600_sata); 887DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
887DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_sb600_sata); 888DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
889DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
890DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
888 891
889/* 892/*
890 * Serverworks CSB5 IDE does not fully support native mode 893 * Serverworks CSB5 IDE does not fully support native mode
@@ -1775,6 +1778,68 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
1775DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, 1778DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1776 quirk_nvidia_ck804_msi_ht_cap); 1779 quirk_nvidia_ck804_msi_ht_cap);
1777 1780
1781/*
1782 * Force enable MSI mapping capability on HT bridges */
1783static inline void ht_enable_msi_mapping(struct pci_dev *dev)
1784{
1785 int pos, ttl = 48;
1786
1787 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
1788 while (pos && ttl--) {
1789 u8 flags;
1790
1791 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
1792 &flags) == 0) {
1793 dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
1794
1795 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
1796 flags | HT_MSI_FLAGS_ENABLE);
1797 }
1798 pos = pci_find_next_ht_capability(dev, pos,
1799 HT_CAPTYPE_MSI_MAPPING);
1800 }
1801}
1802
1803static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
1804{
1805 struct pci_dev *host_bridge;
1806 int pos, ttl = 48;
1807
1808 /*
1809 * HT MSI mapping should be disabled on devices that are below
1810 * a non-Hypertransport host bridge. Locate the host bridge...
1811 */
1812 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1813 if (host_bridge == NULL) {
1814 dev_warn(&dev->dev,
1815 "nv_msi_ht_cap_quirk didn't locate host bridge\n");
1816 return;
1817 }
1818
1819 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
1820 if (pos != 0) {
1821 /* Host bridge is to HT */
1822 ht_enable_msi_mapping(dev);
1823 return;
1824 }
1825
1826 /* Host bridge is not to HT, disable HT MSI mapping on this device */
1827 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
1828 while (pos && ttl--) {
1829 u8 flags;
1830
1831 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
1832 &flags) == 0) {
1833 dev_info(&dev->dev, "Quirk disabling HT MSI mapping");
1834 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
1835 flags & ~HT_MSI_FLAGS_ENABLE);
1836 }
1837 pos = pci_find_next_ht_capability(dev, pos,
1838 HT_CAPTYPE_MSI_MAPPING);
1839 }
1840}
1841DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk);
1842
1778static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev) 1843static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
1779{ 1844{
1780 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; 1845 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 262b0439abe9..125e7b7f34ff 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -206,10 +206,8 @@ pci_setup_bridge(struct pci_bus *bus)
206 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { 206 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
207 l = (region.start >> 16) & 0xfff0; 207 l = (region.start >> 16) & 0xfff0;
208 l |= region.end & 0xfff00000; 208 l |= region.end & 0xfff00000;
209#ifdef CONFIG_RESOURCES_64BIT 209 bu = upper_32_bits(region.start);
210 bu = region.start >> 32; 210 lu = upper_32_bits(region.end);
211 lu = region.end >> 32;
212#endif
213 DBG(KERN_INFO " PREFETCH window: 0x%016llx-0x%016llx\n", 211 DBG(KERN_INFO " PREFETCH window: 0x%016llx-0x%016llx\n",
214 (unsigned long long)region.start, 212 (unsigned long long)region.start,
215 (unsigned long long)region.end); 213 (unsigned long long)region.end);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 749515534cc0..e54ecc580d9e 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -647,7 +647,12 @@ static int i82092aa_set_mem_map(struct pcmcia_socket *socket, struct pccard_mem_
647 if ( (mem->card_start > 0x3ffffff) || (region.start > region.end) || 647 if ( (mem->card_start > 0x3ffffff) || (region.start > region.end) ||
648 (mem->speed > 1000) ) { 648 (mem->speed > 1000) ) {
649 leave("i82092aa_set_mem_map: invalid address / speed"); 649 leave("i82092aa_set_mem_map: invalid address / speed");
650 printk("invalid mem map for socket %i : %lx to %lx with a start of %x \n",sock,region.start, region.end, mem->card_start); 650 printk("invalid mem map for socket %i: %llx to %llx with a "
651 "start of %x\n",
652 sock,
653 (unsigned long long)region.start,
654 (unsigned long long)region.end,
655 mem->card_start);
651 return -EINVAL; 656 return -EINVAL;
652 } 657 }
653 658
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 662b4c279cfc..c283a9a70d83 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -36,7 +36,7 @@ static int num = 0;
36 * have irqs (PIC, Timer) because we call acpi_register_gsi. 36 * have irqs (PIC, Timer) because we call acpi_register_gsi.
37 * Finally, only devices that have a CRS method need to be in this list. 37 * Finally, only devices that have a CRS method need to be in this list.
38 */ 38 */
39static struct __initdata acpi_device_id excluded_id_list[] = { 39static struct acpi_device_id excluded_id_list[] __initdata = {
40 {"PNP0C09", 0}, /* EC */ 40 {"PNP0C09", 0}, /* EC */
41 {"PNP0C0F", 0}, /* Link device */ 41 {"PNP0C0F", 0}, /* Link device */
42 {"PNP0000", 0}, /* PIC */ 42 {"PNP0000", 0}, /* PIC */
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index f7e67197a568..a8a51500e1e9 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -105,8 +105,6 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
105 char *argv[3], **envp, *buf, *scratch; 105 char *argv[3], **envp, *buf, *scratch;
106 int i = 0, value; 106 int i = 0, value;
107 107
108 if (!current->fs->root)
109 return -EAGAIN;
110 if (!(envp = kcalloc(20, sizeof(char *), GFP_KERNEL))) 108 if (!(envp = kcalloc(20, sizeof(char *), GFP_KERNEL)))
111 return -ENOMEM; 109 return -ENOMEM;
112 if (!(buf = kzalloc(256, GFP_KERNEL))) { 110 if (!(buf = kzalloc(256, GFP_KERNEL))) {
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 4c066545d176..6c9592ce4996 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -76,7 +76,6 @@
76 * 76 *
77 * @pm_control: Shadow of the processor's pm_control register. 77 * @pm_control: Shadow of the processor's pm_control register.
78 * @pm_start_stop: Shadow of the processor's pm_start_stop register. 78 * @pm_start_stop: Shadow of the processor's pm_start_stop register.
79 * @pm_interval: Shadow of the processor's pm_interval register.
80 * @group_control: Shadow of the processor's group_control register. 79 * @group_control: Shadow of the processor's group_control register.
81 * @debug_bus_control: Shadow of the processor's debug_bus_control register. 80 * @debug_bus_control: Shadow of the processor's debug_bus_control register.
82 * 81 *
@@ -91,7 +90,6 @@
91struct ps3_lpm_shadow_regs { 90struct ps3_lpm_shadow_regs {
92 u64 pm_control; 91 u64 pm_control;
93 u64 pm_start_stop; 92 u64 pm_start_stop;
94 u64 pm_interval;
95 u64 group_control; 93 u64 group_control;
96 u64 debug_bus_control; 94 u64 debug_bus_control;
97}; 95};
@@ -181,9 +179,9 @@ void ps3_set_bookmark(u64 bookmark)
181 * includes cycles before the call. 179 * includes cycles before the call.
182 */ 180 */
183 181
184 asm volatile("or 29, 29, 29;"); /* db10cyc */ 182 asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
185 mtspr(SPRN_BKMK, bookmark); 183 mtspr(SPRN_BKMK, bookmark);
186 asm volatile("or 29, 29, 29;"); /* db10cyc */ 184 asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;");
187} 185}
188EXPORT_SYMBOL_GPL(ps3_set_bookmark); 186EXPORT_SYMBOL_GPL(ps3_set_bookmark);
189 187
@@ -408,7 +406,14 @@ u32 ps3_read_pm(u32 cpu, enum pm_reg_name reg)
408 case pm_start_stop: 406 case pm_start_stop:
409 return lpm_priv->shadow.pm_start_stop; 407 return lpm_priv->shadow.pm_start_stop;
410 case pm_interval: 408 case pm_interval:
411 return lpm_priv->shadow.pm_interval; 409 result = lv1_set_lpm_interval(lpm_priv->lpm_id, 0, 0, &val);
410 if (result) {
411 val = 0;
412 dev_dbg(sbd_core(), "%s:%u: lv1 set_inteval failed: "
413 "reg %u, %s\n", __func__, __LINE__, reg,
414 ps3_result(result));
415 }
416 return (u32)val;
412 case group_control: 417 case group_control:
413 return lpm_priv->shadow.group_control; 418 return lpm_priv->shadow.group_control;
414 case debug_bus_control: 419 case debug_bus_control:
@@ -475,10 +480,8 @@ void ps3_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
475 lpm_priv->shadow.pm_control = val; 480 lpm_priv->shadow.pm_control = val;
476 break; 481 break;
477 case pm_interval: 482 case pm_interval:
478 if (val != lpm_priv->shadow.pm_interval) 483 result = lv1_set_lpm_interval(lpm_priv->lpm_id, val,
479 result = lv1_set_lpm_interval(lpm_priv->lpm_id, val, 484 PS3_WRITE_PM_MASK, &dummy);
480 PS3_WRITE_PM_MASK, &dummy);
481 lpm_priv->shadow.pm_interval = val;
482 break; 485 break;
483 case pm_start_stop: 486 case pm_start_stop:
484 if (val != lpm_priv->shadow.pm_start_stop) 487 if (val != lpm_priv->shadow.pm_start_stop)
@@ -1140,7 +1143,6 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
1140 1143
1141 lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT; 1144 lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT;
1142 lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT; 1145 lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT;
1143 lpm_priv->shadow.pm_interval = PS3_LPM_SHADOW_REG_INIT;
1144 lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT; 1146 lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
1145 lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT; 1147 lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;
1146 1148
diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c
index c3c3aba3ffce..d4f6f960dd18 100644
--- a/drivers/ps3/ps3-sys-manager.c
+++ b/drivers/ps3/ps3-sys-manager.c
@@ -28,10 +28,6 @@
28 28
29#include "vuart.h" 29#include "vuart.h"
30 30
31MODULE_AUTHOR("Sony Corporation");
32MODULE_LICENSE("GPL v2");
33MODULE_DESCRIPTION("PS3 System Manager");
34
35/** 31/**
36 * ps3_sys_manager - PS3 system manager driver. 32 * ps3_sys_manager - PS3 system manager driver.
37 * 33 *
@@ -142,9 +138,11 @@ enum ps3_sys_manager_attr {
142 138
143/** 139/**
144 * enum ps3_sys_manager_event - External event type, reported by system manager. 140 * enum ps3_sys_manager_event - External event type, reported by system manager.
145 * @PS3_SM_EVENT_POWER_PRESSED: payload.value not used. 141 * @PS3_SM_EVENT_POWER_PRESSED: payload.value =
142 * enum ps3_sys_manager_button_event.
146 * @PS3_SM_EVENT_POWER_RELEASED: payload.value = time pressed in millisec. 143 * @PS3_SM_EVENT_POWER_RELEASED: payload.value = time pressed in millisec.
147 * @PS3_SM_EVENT_RESET_PRESSED: payload.value not used. 144 * @PS3_SM_EVENT_RESET_PRESSED: payload.value =
145 * enum ps3_sys_manager_button_event.
148 * @PS3_SM_EVENT_RESET_RELEASED: payload.value = time pressed in millisec. 146 * @PS3_SM_EVENT_RESET_RELEASED: payload.value = time pressed in millisec.
149 * @PS3_SM_EVENT_THERMAL_ALERT: payload.value = thermal zone id. 147 * @PS3_SM_EVENT_THERMAL_ALERT: payload.value = thermal zone id.
150 * @PS3_SM_EVENT_THERMAL_CLEARED: payload.value = thermal zone id. 148 * @PS3_SM_EVENT_THERMAL_CLEARED: payload.value = thermal zone id.
@@ -162,6 +160,17 @@ enum ps3_sys_manager_event {
162}; 160};
163 161
164/** 162/**
163 * enum ps3_sys_manager_button_event - Button event payload values.
164 * @PS3_SM_BUTTON_EVENT_HARD: Hardware generated event.
165 * @PS3_SM_BUTTON_EVENT_SOFT: Software generated event.
166 */
167
168enum ps3_sys_manager_button_event {
169 PS3_SM_BUTTON_EVENT_HARD = 0,
170 PS3_SM_BUTTON_EVENT_SOFT = 1,
171};
172
173/**
165 * enum ps3_sys_manager_next_op - Operation to perform after lpar is destroyed. 174 * enum ps3_sys_manager_next_op - Operation to perform after lpar is destroyed.
166 */ 175 */
167 176
@@ -181,7 +190,9 @@ enum ps3_sys_manager_next_op {
181 * @PS3_SM_WAKE_P_O_R: Power on reset. 190 * @PS3_SM_WAKE_P_O_R: Power on reset.
182 * 191 *
183 * Additional wakeup sources when specifying PS3_SM_NEXT_OP_SYS_SHUTDOWN. 192 * Additional wakeup sources when specifying PS3_SM_NEXT_OP_SYS_SHUTDOWN.
184 * System will always wake from the PS3_SM_WAKE_DEFAULT sources. 193 * The system will always wake from the PS3_SM_WAKE_DEFAULT sources.
194 * Sources listed here are the only ones available to guests in the
195 * other-os lpar.
185 */ 196 */
186 197
187enum ps3_sys_manager_wake_source { 198enum ps3_sys_manager_wake_source {
@@ -189,7 +200,7 @@ enum ps3_sys_manager_wake_source {
189 PS3_SM_WAKE_DEFAULT = 0, 200 PS3_SM_WAKE_DEFAULT = 0,
190 PS3_SM_WAKE_RTC = 0x00000040, 201 PS3_SM_WAKE_RTC = 0x00000040,
191 PS3_SM_WAKE_RTC_ERROR = 0x00000080, 202 PS3_SM_WAKE_RTC_ERROR = 0x00000080,
192 PS3_SM_WAKE_P_O_R = 0x10000000, 203 PS3_SM_WAKE_P_O_R = 0x80000000,
193}; 204};
194 205
195/** 206/**
@@ -418,8 +429,10 @@ static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev)
418 429
419 switch (event.type) { 430 switch (event.type) {
420 case PS3_SM_EVENT_POWER_PRESSED: 431 case PS3_SM_EVENT_POWER_PRESSED:
421 dev_dbg(&dev->core, "%s:%d: POWER_PRESSED\n", 432 dev_dbg(&dev->core, "%s:%d: POWER_PRESSED (%s)\n",
422 __func__, __LINE__); 433 __func__, __LINE__,
434 (event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft"
435 : "hard"));
423 ps3_sm_force_power_off = 1; 436 ps3_sm_force_power_off = 1;
424 /* 437 /*
425 * A memory barrier is use here to sync memory since 438 * A memory barrier is use here to sync memory since
@@ -434,8 +447,10 @@ static int ps3_sys_manager_handle_event(struct ps3_system_bus_device *dev)
434 __func__, __LINE__, event.value); 447 __func__, __LINE__, event.value);
435 break; 448 break;
436 case PS3_SM_EVENT_RESET_PRESSED: 449 case PS3_SM_EVENT_RESET_PRESSED:
437 dev_dbg(&dev->core, "%s:%d: RESET_PRESSED\n", 450 dev_dbg(&dev->core, "%s:%d: RESET_PRESSED (%s)\n",
438 __func__, __LINE__); 451 __func__, __LINE__,
452 (event.value == PS3_SM_BUTTON_EVENT_SOFT ? "soft"
453 : "hard"));
439 ps3_sm_force_power_off = 0; 454 ps3_sm_force_power_off = 0;
440 /* 455 /*
441 * A memory barrier is use here to sync memory since 456 * A memory barrier is use here to sync memory since
@@ -622,7 +637,7 @@ static void ps3_sys_manager_final_restart(struct ps3_system_bus_device *dev)
622 ps3_vuart_cancel_async(dev); 637 ps3_vuart_cancel_async(dev);
623 638
624 ps3_sys_manager_send_attr(dev, 0); 639 ps3_sys_manager_send_attr(dev, 0);
625 ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_LPAR_REBOOT, 640 ps3_sys_manager_send_next_op(dev, PS3_SM_NEXT_OP_SYS_REBOOT,
626 PS3_SM_WAKE_DEFAULT); 641 PS3_SM_WAKE_DEFAULT);
627 ps3_sys_manager_send_request_shutdown(dev); 642 ps3_sys_manager_send_request_shutdown(dev);
628 643
@@ -699,4 +714,7 @@ static int __init ps3_sys_manager_init(void)
699module_init(ps3_sys_manager_init); 714module_init(ps3_sys_manager_init);
700/* Module remove not supported. */ 715/* Module remove not supported. */
701 716
717MODULE_AUTHOR("Sony Corporation");
718MODULE_LICENSE("GPL v2");
719MODULE_DESCRIPTION("PS3 System Manager");
702MODULE_ALIAS(PS3_MODULE_ALIAS_SYSTEM_MANAGER); 720MODULE_ALIAS(PS3_MODULE_ALIAS_SYSTEM_MANAGER);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index e059f94c79eb..f3ee2ad566b4 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -388,6 +388,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
388 return seq_printf(seq, 388 return seq_printf(seq,
389 "periodic_IRQ\t: %s\n" 389 "periodic_IRQ\t: %s\n"
390 "update_IRQ\t: %s\n" 390 "update_IRQ\t: %s\n"
391 "HPET_emulated\t: %s\n"
391 // "square_wave\t: %s\n" 392 // "square_wave\t: %s\n"
392 // "BCD\t\t: %s\n" 393 // "BCD\t\t: %s\n"
393 "DST_enable\t: %s\n" 394 "DST_enable\t: %s\n"
@@ -395,6 +396,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
395 "batt_status\t: %s\n", 396 "batt_status\t: %s\n",
396 (rtc_control & RTC_PIE) ? "yes" : "no", 397 (rtc_control & RTC_PIE) ? "yes" : "no",
397 (rtc_control & RTC_UIE) ? "yes" : "no", 398 (rtc_control & RTC_UIE) ? "yes" : "no",
399 is_hpet_enabled() ? "yes" : "no",
398 // (rtc_control & RTC_SQWE) ? "yes" : "no", 400 // (rtc_control & RTC_SQWE) ? "yes" : "no",
399 // (rtc_control & RTC_DM_BINARY) ? "no" : "yes", 401 // (rtc_control & RTC_DM_BINARY) ? "no" : "yes",
400 (rtc_control & RTC_DST_EN) ? "yes" : "no", 402 (rtc_control & RTC_DST_EN) ? "yes" : "no",
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d984e0fae630..ccf46c96adb4 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1149,12 +1149,14 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1149{ 1149{
1150 struct list_head *l, *n; 1150 struct list_head *l, *n;
1151 struct dasd_ccw_req *cqr; 1151 struct dasd_ccw_req *cqr;
1152 struct dasd_block *block;
1152 1153
1153 list_for_each_safe(l, n, final_queue) { 1154 list_for_each_safe(l, n, final_queue) {
1154 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1155 list_del_init(&cqr->devlist); 1156 list_del_init(&cqr->devlist);
1156 if (cqr->block) 1157 block = cqr->block;
1157 spin_lock_bh(&cqr->block->queue_lock); 1158 if (block)
1159 spin_lock_bh(&block->queue_lock);
1158 switch (cqr->status) { 1160 switch (cqr->status) {
1159 case DASD_CQR_SUCCESS: 1161 case DASD_CQR_SUCCESS:
1160 cqr->status = DASD_CQR_DONE; 1162 cqr->status = DASD_CQR_DONE;
@@ -1172,15 +1174,13 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1172 cqr, cqr->status); 1174 cqr, cqr->status);
1173 BUG(); 1175 BUG();
1174 } 1176 }
1175 if (cqr->block)
1176 spin_unlock_bh(&cqr->block->queue_lock);
1177 if (cqr->callback != NULL) 1177 if (cqr->callback != NULL)
1178 (cqr->callback)(cqr, cqr->callback_data); 1178 (cqr->callback)(cqr, cqr->callback_data);
1179 if (block)
1180 spin_unlock_bh(&block->queue_lock);
1179 } 1181 }
1180} 1182}
1181 1183
1182
1183
1184/* 1184/*
1185 * Take a look at the first request on the ccw queue and check 1185 * Take a look at the first request on the ccw queue and check
1186 * if it reached its expire time. If so, terminate the IO. 1186 * if it reached its expire time. If so, terminate the IO.
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 3faf0538b328..e6c94dbfdeaa 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -666,7 +666,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
666 page_addr = (unsigned long) 666 page_addr = (unsigned long)
667 page_address(bvec->bv_page) + bvec->bv_offset; 667 page_address(bvec->bv_page) + bvec->bv_offset;
668 source_addr = dev_info->start + (index<<12) + bytes_done; 668 source_addr = dev_info->start + (index<<12) + bytes_done;
669 if (unlikely(page_addr & 4095) != 0 || (bvec->bv_len & 4095) != 0) 669 if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
670 // More paranoia. 670 // More paranoia.
671 goto fail; 671 goto fail;
672 if (bio_data_dir(bio) == READ) { 672 if (bio_data_dir(bio) == READ) {
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 25629b92dec3..2c7a1ee6b041 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -29,10 +29,10 @@ static ext_int_info_t ext_int_info_hwc;
29/* Lock to protect internal data consistency. */ 29/* Lock to protect internal data consistency. */
30static DEFINE_SPINLOCK(sclp_lock); 30static DEFINE_SPINLOCK(sclp_lock);
31 31
32/* Mask of events that we can receive from the sclp interface. */ 32/* Mask of events that we can send to the sclp interface. */
33static sccb_mask_t sclp_receive_mask; 33static sccb_mask_t sclp_receive_mask;
34 34
35/* Mask of events that we can send to the sclp interface. */ 35/* Mask of events that we can receive from the sclp interface. */
36static sccb_mask_t sclp_send_mask; 36static sccb_mask_t sclp_send_mask;
37 37
38/* List of registered event listeners and senders. */ 38/* List of registered event listeners and senders. */
@@ -380,7 +380,7 @@ sclp_interrupt_handler(__u16 code)
380 } 380 }
381 sclp_running_state = sclp_running_state_idle; 381 sclp_running_state = sclp_running_state_idle;
382 } 382 }
383 if (evbuf_pending && sclp_receive_mask != 0 && 383 if (evbuf_pending &&
384 sclp_activation_state == sclp_activation_state_active) 384 sclp_activation_state == sclp_activation_state_active)
385 __sclp_queue_read_req(); 385 __sclp_queue_read_req();
386 spin_unlock(&sclp_lock); 386 spin_unlock(&sclp_lock);
@@ -459,8 +459,8 @@ sclp_dispatch_state_change(void)
459 reg = NULL; 459 reg = NULL;
460 list_for_each(l, &sclp_reg_list) { 460 list_for_each(l, &sclp_reg_list) {
461 reg = list_entry(l, struct sclp_register, list); 461 reg = list_entry(l, struct sclp_register, list);
462 receive_mask = reg->receive_mask & sclp_receive_mask; 462 receive_mask = reg->send_mask & sclp_receive_mask;
463 send_mask = reg->send_mask & sclp_send_mask; 463 send_mask = reg->receive_mask & sclp_send_mask;
464 if (reg->sclp_receive_mask != receive_mask || 464 if (reg->sclp_receive_mask != receive_mask ||
465 reg->sclp_send_mask != send_mask) { 465 reg->sclp_send_mask != send_mask) {
466 reg->sclp_receive_mask = receive_mask; 466 reg->sclp_receive_mask = receive_mask;
@@ -615,8 +615,8 @@ struct init_sccb {
615 u16 mask_length; 615 u16 mask_length;
616 sccb_mask_t receive_mask; 616 sccb_mask_t receive_mask;
617 sccb_mask_t send_mask; 617 sccb_mask_t send_mask;
618 sccb_mask_t sclp_send_mask;
619 sccb_mask_t sclp_receive_mask; 618 sccb_mask_t sclp_receive_mask;
619 sccb_mask_t sclp_send_mask;
620} __attribute__((packed)); 620} __attribute__((packed));
621 621
622/* Prepare init mask request. Called while sclp_lock is locked. */ 622/* Prepare init mask request. Called while sclp_lock is locked. */
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index aa8186d18aee..bac80e856f97 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -122,11 +122,13 @@ struct sclp_req {
122/* of some routines it wants to be called from the low level driver */ 122/* of some routines it wants to be called from the low level driver */
123struct sclp_register { 123struct sclp_register {
124 struct list_head list; 124 struct list_head list;
125 /* event masks this user is registered for */ 125 /* User wants to receive: */
126 sccb_mask_t receive_mask; 126 sccb_mask_t receive_mask;
127 /* User wants to send: */
127 sccb_mask_t send_mask; 128 sccb_mask_t send_mask;
128 /* actually present events */ 129 /* H/W can receive: */
129 sccb_mask_t sclp_receive_mask; 130 sccb_mask_t sclp_receive_mask;
131 /* H/W can send: */
130 sccb_mask_t sclp_send_mask; 132 sccb_mask_t sclp_send_mask;
131 /* called if event type availability changes */ 133 /* called if event type availability changes */
132 void (*state_change_fn)(struct sclp_register *); 134 void (*state_change_fn)(struct sclp_register *);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 9dc77f14fa52..b8f35bc52b7b 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -64,7 +64,7 @@ static int __init sclp_conf_init(void)
64 return rc; 64 return rc;
65 } 65 }
66 66
67 if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) { 67 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
68 printk(KERN_WARNING TAG "no configuration management.\n"); 68 printk(KERN_WARNING TAG "no configuration management.\n");
69 sclp_unregister(&sclp_conf_register); 69 sclp_unregister(&sclp_conf_register);
70 rc = -ENOSYS; 70 rc = -ENOSYS;
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 41617032afdc..9f37456222e9 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -129,7 +129,7 @@ static int cpi_req(void)
129 "to hardware console.\n"); 129 "to hardware console.\n");
130 goto out; 130 goto out;
131 } 131 }
132 if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) { 132 if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
133 printk(KERN_WARNING "cpi: no control program " 133 printk(KERN_WARNING "cpi: no control program "
134 "identification support\n"); 134 "identification support\n");
135 rc = -EOPNOTSUPP; 135 rc = -EOPNOTSUPP;
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index ad7195d3de0c..da09781b32f7 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -452,10 +452,10 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
452 return -EIO; 452 return -EIO;
453 453
454 sccb = buffer->sccb; 454 sccb = buffer->sccb;
455 if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK) 455 if (sclp_rw_event.sclp_receive_mask & EVTYP_MSG_MASK)
456 /* Use normal write message */ 456 /* Use normal write message */
457 sccb->msg_buf.header.type = EVTYP_MSG; 457 sccb->msg_buf.header.type = EVTYP_MSG;
458 else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK) 458 else if (sclp_rw_event.sclp_receive_mask & EVTYP_PMSGCMD_MASK)
459 /* Use write priority message */ 459 /* Use write priority message */
460 sccb->msg_buf.header.type = EVTYP_PMSGCMD; 460 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
461 else 461 else
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f47f4a768be5..92f527201792 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
202static int 202static int
203__sclp_vt220_emit(struct sclp_vt220_request *request) 203__sclp_vt220_emit(struct sclp_vt220_request *request)
204{ 204{
205 if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) { 205 if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
206 request->sclp_req.status = SCLP_REQ_FAILED; 206 request->sclp_req.status = SCLP_REQ_FAILED;
207 return -EIO; 207 return -EIO;
208 } 208 }
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d35dc3f25d06..fec004f62bcf 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -32,7 +32,7 @@
32#include "io_sch.h" 32#include "io_sch.h"
33 33
34static struct timer_list recovery_timer; 34static struct timer_list recovery_timer;
35static spinlock_t recovery_lock; 35static DEFINE_SPINLOCK(recovery_lock);
36static int recovery_phase; 36static int recovery_phase;
37static const unsigned long recovery_delay[] = { 3, 30, 300 }; 37static const unsigned long recovery_delay[] = { 3, 30, 300 };
38 38
@@ -1535,7 +1535,7 @@ static int recovery_check(struct device *dev, void *data)
1535 return 0; 1535 return 0;
1536} 1536}
1537 1537
1538static void recovery_func(unsigned long data) 1538static void recovery_work_func(struct work_struct *unused)
1539{ 1539{
1540 int redo = 0; 1540 int redo = 0;
1541 1541
@@ -1553,6 +1553,17 @@ static void recovery_func(unsigned long data)
1553 CIO_MSG_EVENT(2, "recovery: end\n"); 1553 CIO_MSG_EVENT(2, "recovery: end\n");
1554} 1554}
1555 1555
1556static DECLARE_WORK(recovery_work, recovery_work_func);
1557
1558static void recovery_func(unsigned long data)
1559{
1560 /*
1561 * We can't do our recovery in softirq context and it's not
1562 * performance critical, so we schedule it.
1563 */
1564 schedule_work(&recovery_work);
1565}
1566
1556void ccw_device_schedule_recovery(void) 1567void ccw_device_schedule_recovery(void)
1557{ 1568{
1558 unsigned long flags; 1569 unsigned long flags;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 097fc0967e9d..2b5bfb7c69e5 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -32,7 +32,7 @@
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35 35#include <linux/delay.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
@@ -1215,9 +1215,6 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
1215 1215
1216 if (!no_used) 1216 if (!no_used)
1217 return 1; 1217 return 1;
1218 if (!q->siga_sync && !irq->is_qebsm)
1219 /* we'll check for more primed buffers in qeth_stop_polling */
1220 return 0;
1221 if (irq->is_qebsm) { 1218 if (irq->is_qebsm) {
1222 count = 1; 1219 count = 1;
1223 start_buf = q->first_to_check; 1220 start_buf = q->first_to_check;
@@ -3332,13 +3329,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
3332 } 3329 }
3333 } 3330 }
3334 3331
3335 wait_event_interruptible_timeout(cdev->private->wait_q, 3332 msleep(QDIO_ACTIVATE_TIMEOUT);
3336 ((irq_ptr->state ==
3337 QDIO_IRQ_STATE_STOPPED) ||
3338 (irq_ptr->state ==
3339 QDIO_IRQ_STATE_ERR)),
3340 QDIO_ACTIVATE_TIMEOUT);
3341
3342 switch (irq_ptr->state) { 3333 switch (irq_ptr->state) {
3343 case QDIO_IRQ_STATE_STOPPED: 3334 case QDIO_IRQ_STATE_STOPPED:
3344 case QDIO_IRQ_STATE_ERR: 3335 case QDIO_IRQ_STATE_ERR:
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 37870e4e938e..da8a272fd75b 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -57,10 +57,10 @@
57 of the queue to 0 */ 57 of the queue to 0 */
58 58
59#define QDIO_ESTABLISH_TIMEOUT (1*HZ) 59#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
60#define QDIO_ACTIVATE_TIMEOUT (5*HZ)
61#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) 60#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
62#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) 61#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
63#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) 62#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
63#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */
64 64
65enum qdio_irq_states { 65enum qdio_irq_states {
66 QDIO_IRQ_STATE_INACTIVE, 66 QDIO_IRQ_STATE_INACTIVE,
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 1ee9a6f06541..1a89d989f348 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -114,11 +114,20 @@ do { \
114 debug_event(claw_dbf_##name,level,(void*)(addr),len); \ 114 debug_event(claw_dbf_##name,level,(void*)(addr),len); \
115} while (0) 115} while (0)
116 116
117/* Allow to sort out low debug levels early to avoid wasted sprints */
118static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
119{
120 return (level <= dbf_grp->level);
121}
122
117#define CLAW_DBF_TEXT_(level,name,text...) \ 123#define CLAW_DBF_TEXT_(level,name,text...) \
118do { \ 124 do { \
119 sprintf(debug_buffer, text); \ 125 if (claw_dbf_passes(claw_dbf_##name, level)) { \
120 debug_text_event(claw_dbf_##name,level, debug_buffer);\ 126 sprintf(debug_buffer, text); \
121} while (0) 127 debug_text_event(claw_dbf_##name, level, \
128 debug_buffer); \
129 } \
130 } while (0)
122 131
123/******************************************************* 132/*******************************************************
124* Define Control Blocks * 133* Define Control Blocks *
@@ -278,8 +287,6 @@ struct claw_env {
278 __u16 write_size; /* write buffer size */ 287 __u16 write_size; /* write buffer size */
279 __u16 dev_id; /* device ident */ 288 __u16 dev_id; /* device ident */
280 __u8 packing; /* are we packing? */ 289 __u8 packing; /* are we packing? */
281 volatile __u8 queme_switch; /* gate for imed packing */
282 volatile unsigned long pk_delay; /* Delay for adaptive packing */
283 __u8 in_use; /* device active flag */ 290 __u8 in_use; /* device active flag */
284 struct net_device *ndev; /* backward ptr to the net dev*/ 291 struct net_device *ndev; /* backward ptr to the net dev*/
285}; 292};
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 7bfe8d707a34..f51ed9972587 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -94,7 +94,7 @@ static int
94lcs_register_debug_facility(void) 94lcs_register_debug_facility(void)
95{ 95{
96 lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); 96 lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
97 lcs_dbf_trace = debug_register("lcs_trace", 2, 2, 8); 97 lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
98 if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { 98 if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
99 PRINT_ERR("Not enough memory for debug facility.\n"); 99 PRINT_ERR("Not enough memory for debug facility.\n");
100 lcs_unregister_debug_facility(); 100 lcs_unregister_debug_facility();
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 8976fb0b070a..d58fea52557d 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -16,11 +16,19 @@ do { \
16 debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ 16 debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
17} while (0) 17} while (0)
18 18
19/* Allow to sort out low debug levels early to avoid wasted sprints */
20static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
21{
22 return (level <= dbf_grp->level);
23}
24
19#define LCS_DBF_TEXT_(level,name,text...) \ 25#define LCS_DBF_TEXT_(level,name,text...) \
20do { \ 26 do { \
21 sprintf(debug_buffer, text); \ 27 if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
22 debug_text_event(lcs_dbf_##name,level, debug_buffer);\ 28 sprintf(debug_buffer, text); \
23} while (0) 29 debug_text_event(lcs_dbf_##name, level, debug_buffer); \
30 } \
31 } while (0)
24 32
25/** 33/**
26 * sysfs related stuff 34 * sysfs related stuff
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index f3d893cfe61d..874a19994489 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -97,12 +97,22 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
97 97
98DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); 98DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
99 99
100#define IUCV_DBF_TEXT_(name,level,text...) \ 100/* Allow to sort out low debug levels early to avoid wasted sprints */
101 do { \ 101static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
102 char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ 102{
103 sprintf(iucv_dbf_txt_buf, text); \ 103 return (level <= dbf_grp->level);
104 debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ 104}
105 put_cpu_var(iucv_dbf_txt_buf); \ 105
106#define IUCV_DBF_TEXT_(name, level, text...) \
107 do { \
108 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
109 char* iucv_dbf_txt_buf = \
110 get_cpu_var(iucv_dbf_txt_buf); \
111 sprintf(iucv_dbf_txt_buf, text); \
112 debug_text_event(iucv_dbf_##name, level, \
113 iucv_dbf_txt_buf); \
114 put_cpu_var(iucv_dbf_txt_buf); \
115 } \
106 } while (0) 116 } while (0)
107 117
108#define IUCV_DBF_SPRINTF(name,level,text...) \ 118#define IUCV_DBF_SPRINTF(name,level,text...) \
@@ -137,6 +147,7 @@ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
137#define PRINTK_HEADER " iucv: " /* for debugging */ 147#define PRINTK_HEADER " iucv: " /* for debugging */
138 148
139static struct device_driver netiucv_driver = { 149static struct device_driver netiucv_driver = {
150 .owner = THIS_MODULE,
140 .name = "netiucv", 151 .name = "netiucv",
141 .bus = &iucv_bus, 152 .bus = &iucv_bus,
142}; 153};
@@ -572,9 +583,9 @@ static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
572} 583}
573 584
574/** 585/**
575 * Dummy NOP action for all statemachines 586 * NOP action for statemachines
576 */ 587 */
577static void fsm_action_nop(fsm_instance *fi, int event, void *arg) 588static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
578{ 589{
579} 590}
580 591
@@ -1110,7 +1121,7 @@ static const fsm_node dev_fsm[] = {
1110 1121
1111 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, 1122 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
1112 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown }, 1123 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
1113 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop }, 1124 { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
1114}; 1125};
1115 1126
1116static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); 1127static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a5f0aaaf0dd4..c46666a24809 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -722,7 +722,7 @@ config SCSI_FD_MCS
722 722
723config SCSI_GDTH 723config SCSI_GDTH
724 tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" 724 tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
725 depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API && PCI_LEGACY 725 depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
726 ---help--- 726 ---help---
727 Formerly called GDT SCSI Disk Array Controller Support. 727 Formerly called GDT SCSI Disk Array Controller Support.
728 728
@@ -992,6 +992,16 @@ config SCSI_IZIP_SLOW_CTR
992 992
993 Generally, saying N is fine. 993 Generally, saying N is fine.
994 994
995config SCSI_MVSAS
996 tristate "Marvell 88SE6440 SAS/SATA support"
997 depends on PCI && SCSI
998 select SCSI_SAS_LIBSAS
999 help
1000 This driver supports Marvell SAS/SATA PCI devices.
1001
1002 To compiler this driver as a module, choose M here: the module
1003 will be called mvsas.
1004
995config SCSI_NCR53C406A 1005config SCSI_NCR53C406A
996 tristate "NCR53c406a SCSI support" 1006 tristate "NCR53c406a SCSI support"
997 depends on ISA && SCSI 1007 depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 925c26b4fff9..23e6ecbd4778 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -119,6 +119,7 @@ obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
119obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ 119obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
120obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 120obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
121obj-$(CONFIG_SCSI_STEX) += stex.o 121obj-$(CONFIG_SCSI_STEX) += stex.o
122obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
122obj-$(CONFIG_PS3_ROM) += ps3rom.o 123obj-$(CONFIG_PS3_ROM) += ps3rom.o
123 124
124obj-$(CONFIG_ARM) += arm/ 125obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index bfd0e64964ac..c05092fd3a9d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -144,51 +144,77 @@ static char *aac_get_status_string(u32 status);
144 */ 144 */
145 145
146static int nondasd = -1; 146static int nondasd = -1;
147static int aac_cache = 0; 147static int aac_cache;
148static int dacmode = -1; 148static int dacmode = -1;
149 149int aac_msi;
150int aac_commit = -1; 150int aac_commit = -1;
151int startup_timeout = 180; 151int startup_timeout = 180;
152int aif_timeout = 120; 152int aif_timeout = 120;
153 153
154module_param(nondasd, int, S_IRUGO|S_IWUSR); 154module_param(nondasd, int, S_IRUGO|S_IWUSR);
155MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on"); 155MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
156 " 0=off, 1=on");
156module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); 157module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
157MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n\tbit 0 - Disable FUA in WRITE SCSI commands\n\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n\tbit 2 - Disable only if Battery not protecting Cache"); 158MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
159 "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
160 "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
161 "\tbit 2 - Disable only if Battery not protecting Cache");
158module_param(dacmode, int, S_IRUGO|S_IWUSR); 162module_param(dacmode, int, S_IRUGO|S_IWUSR);
159MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on"); 163MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
164 " 0=off, 1=on");
160module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); 165module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
161MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on"); 166MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
167 " adapter for foreign arrays.\n"
168 "This is typically needed in systems that do not have a BIOS."
169 " 0=off, 1=on");
170module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
171MODULE_PARM_DESC(msi, "IRQ handling."
172 " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)");
162module_param(startup_timeout, int, S_IRUGO|S_IWUSR); 173module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
163MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for adapter to have it's kernel up and\nrunning. This is typically adjusted for large systems that do not have a BIOS."); 174MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
175 " adapter to have it's kernel up and\n"
176 "running. This is typically adjusted for large systems that do not"
177 " have a BIOS.");
164module_param(aif_timeout, int, S_IRUGO|S_IWUSR); 178module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
165MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for applications to pick up AIFs before\nderegistering them. This is typically adjusted for heavily burdened systems."); 179MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
180 " applications to pick up AIFs before\n"
181 "deregistering them. This is typically adjusted for heavily burdened"
182 " systems.");
166 183
167int numacb = -1; 184int numacb = -1;
168module_param(numacb, int, S_IRUGO|S_IWUSR); 185module_param(numacb, int, S_IRUGO|S_IWUSR);
169MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control blocks (FIB) allocated. Valid values are 512 and down. Default is to use suggestion from Firmware."); 186MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
187 " blocks (FIB) allocated. Valid values are 512 and down. Default is"
188 " to use suggestion from Firmware.");
170 189
171int acbsize = -1; 190int acbsize = -1;
172module_param(acbsize, int, S_IRUGO|S_IWUSR); 191module_param(acbsize, int, S_IRUGO|S_IWUSR);
173MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); 192MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
193 " size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
194 " suggestion from Firmware.");
174 195
175int update_interval = 30 * 60; 196int update_interval = 30 * 60;
176module_param(update_interval, int, S_IRUGO|S_IWUSR); 197module_param(update_interval, int, S_IRUGO|S_IWUSR);
177MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter."); 198MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
199 " updates issued to adapter.");
178 200
179int check_interval = 24 * 60 * 60; 201int check_interval = 24 * 60 * 60;
180module_param(check_interval, int, S_IRUGO|S_IWUSR); 202module_param(check_interval, int, S_IRUGO|S_IWUSR);
181MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks."); 203MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
204 " checks.");
182 205
183int aac_check_reset = 1; 206int aac_check_reset = 1;
184module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); 207module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
185MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the adapter. a value of -1 forces the reset to adapters programmed to ignore it."); 208MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the"
209 " adapter. a value of -1 forces the reset to adapters programmed to"
210 " ignore it.");
186 211
187int expose_physicals = -1; 212int expose_physicals = -1;
188module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 213module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
189MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); 214MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
215 " -1=protect 0=off, 1=on");
190 216
191int aac_reset_devices = 0; 217int aac_reset_devices;
192module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); 218module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
193MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); 219MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
194 220
@@ -1315,7 +1341,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
1315 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), 1341 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
1316 dev->supplement_adapter_info.VpdInfo.Tsid); 1342 dev->supplement_adapter_info.VpdInfo.Tsid);
1317 } 1343 }
1318 if (!aac_check_reset || ((aac_check_reset != 1) && 1344 if (!aac_check_reset || ((aac_check_reset == 1) &&
1319 (dev->supplement_adapter_info.SupportedOptions2 & 1345 (dev->supplement_adapter_info.SupportedOptions2 &
1320 AAC_OPTION_IGNORE_RESET))) { 1346 AAC_OPTION_IGNORE_RESET))) {
1321 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", 1347 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
@@ -1353,13 +1379,14 @@ int aac_get_adapter_info(struct aac_dev* dev)
1353 1379
1354 if (nondasd != -1) 1380 if (nondasd != -1)
1355 dev->nondasd_support = (nondasd!=0); 1381 dev->nondasd_support = (nondasd!=0);
1356 if(dev->nondasd_support != 0) { 1382 if (dev->nondasd_support && !dev->in_reset)
1357 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); 1383 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
1358 }
1359 1384
1360 dev->dac_support = 0; 1385 dev->dac_support = 0;
1361 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){ 1386 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
1362 printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id); 1387 if (!dev->in_reset)
1388 printk(KERN_INFO "%s%d: 64bit support enabled.\n",
1389 dev->name, dev->id);
1363 dev->dac_support = 1; 1390 dev->dac_support = 1;
1364 } 1391 }
1365 1392
@@ -1369,8 +1396,9 @@ int aac_get_adapter_info(struct aac_dev* dev)
1369 if(dev->dac_support != 0) { 1396 if(dev->dac_support != 0) {
1370 if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) && 1397 if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) &&
1371 !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) { 1398 !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) {
1372 printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n", 1399 if (!dev->in_reset)
1373 dev->name, dev->id); 1400 printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
1401 dev->name, dev->id);
1374 } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) && 1402 } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) &&
1375 !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) { 1403 !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) {
1376 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n", 1404 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3195d29f2177..ace0b751c131 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1026,6 +1026,7 @@ struct aac_dev
1026 u8 raw_io_64; 1026 u8 raw_io_64;
1027 u8 printf_enabled; 1027 u8 printf_enabled;
1028 u8 in_reset; 1028 u8 in_reset;
1029 u8 msi;
1029}; 1030};
1030 1031
1031#define aac_adapter_interrupt(dev) \ 1032#define aac_adapter_interrupt(dev) \
@@ -1881,6 +1882,7 @@ extern int startup_timeout;
1881extern int aif_timeout; 1882extern int aif_timeout;
1882extern int expose_physicals; 1883extern int expose_physicals;
1883extern int aac_reset_devices; 1884extern int aac_reset_devices;
1885extern int aac_msi;
1884extern int aac_commit; 1886extern int aac_commit;
1885extern int update_interval; 1887extern int update_interval;
1886extern int check_interval; 1888extern int check_interval;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 81b36923e0ef..47434499e82b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1458,7 +1458,7 @@ int aac_check_health(struct aac_dev * aac)
1458 1458
1459 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); 1459 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1460 1460
1461 if (!aac_check_reset || ((aac_check_reset != 1) && 1461 if (!aac_check_reset || ((aac_check_reset == 1) &&
1462 (aac->supplement_adapter_info.SupportedOptions2 & 1462 (aac->supplement_adapter_info.SupportedOptions2 &
1463 AAC_OPTION_IGNORE_RESET))) 1463 AAC_OPTION_IGNORE_RESET)))
1464 goto out; 1464 goto out;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index e80d2a0c46af..ae5f74fb62d5 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -275,9 +275,9 @@ static const char *aac_info(struct Scsi_Host *shost)
275 275
276/** 276/**
277 * aac_get_driver_ident 277 * aac_get_driver_ident
278 * @devtype: index into lookup table 278 * @devtype: index into lookup table
279 * 279 *
280 * Returns a pointer to the entry in the driver lookup table. 280 * Returns a pointer to the entry in the driver lookup table.
281 */ 281 */
282 282
283struct aac_driver_ident* aac_get_driver_ident(int devtype) 283struct aac_driver_ident* aac_get_driver_ident(int devtype)
@@ -494,13 +494,14 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
494 494
495static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) 495static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
496{ 496{
497 struct scsi_device * sdev = to_scsi_device(dev); 497 struct scsi_device *sdev = to_scsi_device(dev);
498 struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
498 if (sdev_channel(sdev) != CONTAINER_CHANNEL) 499 if (sdev_channel(sdev) != CONTAINER_CHANNEL)
499 return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach 500 return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
500 ? "Hidden\n" : "JBOD"); 501 ? "Hidden\n" :
502 ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
501 return snprintf(buf, PAGE_SIZE, "%s\n", 503 return snprintf(buf, PAGE_SIZE, "%s\n",
502 get_container_type(((struct aac_dev *)(sdev->host->hostdata)) 504 get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
503 ->fsa_dev[sdev_id(sdev)].type));
504} 505}
505 506
506static struct device_attribute aac_raid_level_attr = { 507static struct device_attribute aac_raid_level_attr = {
@@ -641,7 +642,7 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
641 AAC_OPTION_MU_RESET) && 642 AAC_OPTION_MU_RESET) &&
642 aac_check_reset && 643 aac_check_reset &&
643 ((aac_check_reset != 1) || 644 ((aac_check_reset != 1) ||
644 (aac->supplement_adapter_info.SupportedOptions2 & 645 !(aac->supplement_adapter_info.SupportedOptions2 &
645 AAC_OPTION_IGNORE_RESET))) 646 AAC_OPTION_IGNORE_RESET)))
646 aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */ 647 aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
647 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ 648 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
@@ -860,8 +861,8 @@ ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf)
860 le32_to_cpu(dev->adapter_info.serial[0])); 861 le32_to_cpu(dev->adapter_info.serial[0]));
861 if (len && 862 if (len &&
862 !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ 863 !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
863 sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len], 864 sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
864 buf, len)) 865 buf, len-1))
865 len = snprintf(buf, PAGE_SIZE, "%.*s\n", 866 len = snprintf(buf, PAGE_SIZE, "%.*s\n",
866 (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), 867 (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
867 dev->supplement_adapter_info.MfgPcbaSerialNo); 868 dev->supplement_adapter_info.MfgPcbaSerialNo);
@@ -1004,32 +1005,32 @@ static const struct file_operations aac_cfg_fops = {
1004 1005
1005static struct scsi_host_template aac_driver_template = { 1006static struct scsi_host_template aac_driver_template = {
1006 .module = THIS_MODULE, 1007 .module = THIS_MODULE,
1007 .name = "AAC", 1008 .name = "AAC",
1008 .proc_name = AAC_DRIVERNAME, 1009 .proc_name = AAC_DRIVERNAME,
1009 .info = aac_info, 1010 .info = aac_info,
1010 .ioctl = aac_ioctl, 1011 .ioctl = aac_ioctl,
1011#ifdef CONFIG_COMPAT 1012#ifdef CONFIG_COMPAT
1012 .compat_ioctl = aac_compat_ioctl, 1013 .compat_ioctl = aac_compat_ioctl,
1013#endif 1014#endif
1014 .queuecommand = aac_queuecommand, 1015 .queuecommand = aac_queuecommand,
1015 .bios_param = aac_biosparm, 1016 .bios_param = aac_biosparm,
1016 .shost_attrs = aac_attrs, 1017 .shost_attrs = aac_attrs,
1017 .slave_configure = aac_slave_configure, 1018 .slave_configure = aac_slave_configure,
1018 .change_queue_depth = aac_change_queue_depth, 1019 .change_queue_depth = aac_change_queue_depth,
1019 .sdev_attrs = aac_dev_attrs, 1020 .sdev_attrs = aac_dev_attrs,
1020 .eh_abort_handler = aac_eh_abort, 1021 .eh_abort_handler = aac_eh_abort,
1021 .eh_host_reset_handler = aac_eh_reset, 1022 .eh_host_reset_handler = aac_eh_reset,
1022 .can_queue = AAC_NUM_IO_FIB, 1023 .can_queue = AAC_NUM_IO_FIB,
1023 .this_id = MAXIMUM_NUM_CONTAINERS, 1024 .this_id = MAXIMUM_NUM_CONTAINERS,
1024 .sg_tablesize = 16, 1025 .sg_tablesize = 16,
1025 .max_sectors = 128, 1026 .max_sectors = 128,
1026#if (AAC_NUM_IO_FIB > 256) 1027#if (AAC_NUM_IO_FIB > 256)
1027 .cmd_per_lun = 256, 1028 .cmd_per_lun = 256,
1028#else 1029#else
1029 .cmd_per_lun = AAC_NUM_IO_FIB, 1030 .cmd_per_lun = AAC_NUM_IO_FIB,
1030#endif 1031#endif
1031 .use_clustering = ENABLE_CLUSTERING, 1032 .use_clustering = ENABLE_CLUSTERING,
1032 .emulated = 1, 1033 .emulated = 1,
1033}; 1034};
1034 1035
1035static void __aac_shutdown(struct aac_dev * aac) 1036static void __aac_shutdown(struct aac_dev * aac)
@@ -1039,6 +1040,8 @@ static void __aac_shutdown(struct aac_dev * aac)
1039 aac_send_shutdown(aac); 1040 aac_send_shutdown(aac);
1040 aac_adapter_disable_int(aac); 1041 aac_adapter_disable_int(aac);
1041 free_irq(aac->pdev->irq, aac); 1042 free_irq(aac->pdev->irq, aac);
1043 if (aac->msi)
1044 pci_disable_msi(aac->pdev);
1042} 1045}
1043 1046
1044static int __devinit aac_probe_one(struct pci_dev *pdev, 1047static int __devinit aac_probe_one(struct pci_dev *pdev,
@@ -1254,7 +1257,7 @@ static struct pci_driver aac_pci_driver = {
1254 .id_table = aac_pci_tbl, 1257 .id_table = aac_pci_tbl,
1255 .probe = aac_probe_one, 1258 .probe = aac_probe_one,
1256 .remove = __devexit_p(aac_remove_one), 1259 .remove = __devexit_p(aac_remove_one),
1257 .shutdown = aac_shutdown, 1260 .shutdown = aac_shutdown,
1258}; 1261};
1259 1262
1260static int __init aac_init(void) 1263static int __init aac_init(void)
@@ -1271,7 +1274,7 @@ static int __init aac_init(void)
1271 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); 1274 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
1272 if (aac_cfg_major < 0) { 1275 if (aac_cfg_major < 0) {
1273 printk(KERN_WARNING 1276 printk(KERN_WARNING
1274 "aacraid: unable to register \"aac\" device.\n"); 1277 "aacraid: unable to register \"aac\" device.\n");
1275 } 1278 }
1276 1279
1277 return 0; 1280 return 0;
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index a08bbf1fd76c..1f18b83e1e02 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -625,8 +625,11 @@ int _aac_rx_init(struct aac_dev *dev)
625 if (aac_init_adapter(dev) == NULL) 625 if (aac_init_adapter(dev) == NULL)
626 goto error_iounmap; 626 goto error_iounmap;
627 aac_adapter_comm(dev, dev->comm_interface); 627 aac_adapter_comm(dev, dev->comm_interface);
628 if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, 628 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
629 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
629 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 630 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
631 if (dev->msi)
632 pci_disable_msi(dev->pdev);
630 printk(KERN_ERR "%s%d: Interrupt unavailable.\n", 633 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
631 name, instance); 634 name, instance);
632 goto error_iounmap; 635 goto error_iounmap;
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 85b91bc578c9..cfc3410ec073 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -31,6 +31,7 @@
31#include <linux/kernel.h> 31#include <linux/kernel.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/types.h> 33#include <linux/types.h>
34#include <linux/pci.h>
34#include <linux/spinlock.h> 35#include <linux/spinlock.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36#include <linux/blkdev.h> 37#include <linux/blkdev.h>
@@ -385,7 +386,7 @@ int aac_sa_init(struct aac_dev *dev)
385 386
386 if(aac_init_adapter(dev) == NULL) 387 if(aac_init_adapter(dev) == NULL)
387 goto error_irq; 388 goto error_irq;
388 if (request_irq(dev->scsi_host_ptr->irq, dev->a_ops.adapter_intr, 389 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
389 IRQF_SHARED|IRQF_DISABLED, 390 IRQF_SHARED|IRQF_DISABLED,
390 "aacraid", (void *)dev ) < 0) { 391 "aacraid", (void *)dev ) < 0) {
391 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n", 392 printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
@@ -403,7 +404,7 @@ int aac_sa_init(struct aac_dev *dev)
403 404
404error_irq: 405error_irq:
405 aac_sa_disable_interrupt(dev); 406 aac_sa_disable_interrupt(dev);
406 free_irq(dev->scsi_host_ptr->irq, (void *)dev); 407 free_irq(dev->pdev->irq, (void *)dev);
407 408
408error_iounmap: 409error_iounmap:
409 410
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index ccef891d642f..3c2d6888bb8c 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -566,7 +566,7 @@ typedef struct asc_dvc_var {
566 ASC_SCSI_BIT_ID_TYPE unit_not_ready; 566 ASC_SCSI_BIT_ID_TYPE unit_not_ready;
567 ASC_SCSI_BIT_ID_TYPE queue_full_or_busy; 567 ASC_SCSI_BIT_ID_TYPE queue_full_or_busy;
568 ASC_SCSI_BIT_ID_TYPE start_motor; 568 ASC_SCSI_BIT_ID_TYPE start_motor;
569 uchar overrun_buf[ASC_OVERRUN_BSIZE] __aligned(8); 569 uchar *overrun_buf;
570 dma_addr_t overrun_dma; 570 dma_addr_t overrun_dma;
571 uchar scsi_reset_wait; 571 uchar scsi_reset_wait;
572 uchar chip_no; 572 uchar chip_no;
@@ -13833,6 +13833,12 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
13833 */ 13833 */
13834 if (ASC_NARROW_BOARD(boardp)) { 13834 if (ASC_NARROW_BOARD(boardp)) {
13835 ASC_DBG(2, "AscInitAsc1000Driver()\n"); 13835 ASC_DBG(2, "AscInitAsc1000Driver()\n");
13836
13837 asc_dvc_varp->overrun_buf = kzalloc(ASC_OVERRUN_BSIZE, GFP_KERNEL);
13838 if (!asc_dvc_varp->overrun_buf) {
13839 ret = -ENOMEM;
13840 goto err_free_wide_mem;
13841 }
13836 warn_code = AscInitAsc1000Driver(asc_dvc_varp); 13842 warn_code = AscInitAsc1000Driver(asc_dvc_varp);
13837 13843
13838 if (warn_code || asc_dvc_varp->err_code) { 13844 if (warn_code || asc_dvc_varp->err_code) {
@@ -13840,8 +13846,10 @@ static int __devinit advansys_board_found(struct Scsi_Host *shost,
13840 "warn 0x%x, error 0x%x\n", 13846 "warn 0x%x, error 0x%x\n",
13841 asc_dvc_varp->init_state, warn_code, 13847 asc_dvc_varp->init_state, warn_code,
13842 asc_dvc_varp->err_code); 13848 asc_dvc_varp->err_code);
13843 if (asc_dvc_varp->err_code) 13849 if (asc_dvc_varp->err_code) {
13844 ret = -ENODEV; 13850 ret = -ENODEV;
13851 kfree(asc_dvc_varp->overrun_buf);
13852 }
13845 } 13853 }
13846 } else { 13854 } else {
13847 if (advansys_wide_init_chip(shost)) 13855 if (advansys_wide_init_chip(shost))
@@ -13894,6 +13902,7 @@ static int advansys_release(struct Scsi_Host *shost)
13894 dma_unmap_single(board->dev, 13902 dma_unmap_single(board->dev,
13895 board->dvc_var.asc_dvc_var.overrun_dma, 13903 board->dvc_var.asc_dvc_var.overrun_dma,
13896 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE); 13904 ASC_OVERRUN_BSIZE, DMA_FROM_DEVICE);
13905 kfree(board->dvc_var.asc_dvc_var.overrun_buf);
13897 } else { 13906 } else {
13898 iounmap(board->ioremap_addr); 13907 iounmap(board->ioremap_addr);
13899 advansys_wide_free_mem(board); 13908 advansys_wide_free_mem(board);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 4150c8a8fdc2..dfaaae5e73ae 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -89,7 +89,7 @@ ahd_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
89 pci_save_state(pdev); 89 pci_save_state(pdev);
90 pci_disable_device(pdev); 90 pci_disable_device(pdev);
91 91
92 if (mesg.event == PM_EVENT_SUSPEND) 92 if (mesg.event & PM_EVENT_SLEEP)
93 pci_set_power_state(pdev, PCI_D3hot); 93 pci_set_power_state(pdev, PCI_D3hot);
94 94
95 return rc; 95 return rc;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 6d2ae641273c..64e62ce59c15 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -695,15 +695,16 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
695 scb_index = ahc_inb(ahc, SCB_TAG); 695 scb_index = ahc_inb(ahc, SCB_TAG);
696 scb = ahc_lookup_scb(ahc, scb_index); 696 scb = ahc_lookup_scb(ahc, scb_index);
697 if (devinfo.role == ROLE_INITIATOR) { 697 if (devinfo.role == ROLE_INITIATOR) {
698 if (scb == NULL) 698 if (bus_phase == P_MESGOUT) {
699 panic("HOST_MSG_LOOP with " 699 if (scb == NULL)
700 "invalid SCB %x\n", scb_index); 700 panic("HOST_MSG_LOOP with "
701 "invalid SCB %x\n",
702 scb_index);
701 703
702 if (bus_phase == P_MESGOUT)
703 ahc_setup_initiator_msgout(ahc, 704 ahc_setup_initiator_msgout(ahc,
704 &devinfo, 705 &devinfo,
705 scb); 706 scb);
706 else { 707 } else {
707 ahc->msg_type = 708 ahc->msg_type =
708 MSG_TYPE_INITIATOR_MSGIN; 709 MSG_TYPE_INITIATOR_MSGIN;
709 ahc->msgin_index = 0; 710 ahc->msgin_index = 0;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index dd6e21d6f1dd..3d3eaef65fb3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -134,7 +134,7 @@ ahc_linux_pci_dev_suspend(struct pci_dev *pdev, pm_message_t mesg)
134 pci_save_state(pdev); 134 pci_save_state(pdev);
135 pci_disable_device(pdev); 135 pci_disable_device(pdev);
136 136
137 if (mesg.event == PM_EVENT_SUSPEND) 137 if (mesg.event & PM_EVENT_SLEEP)
138 pci_set_power_state(pdev, PCI_D3hot); 138 pci_set_power_state(pdev, PCI_D3hot);
139 139
140 return rc; 140 return rc;
diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h
index fa7c5290257d..912e6b755f74 100644
--- a/drivers/scsi/aic94xx/aic94xx_sas.h
+++ b/drivers/scsi/aic94xx/aic94xx_sas.h
@@ -292,7 +292,7 @@ struct scb_header {
292#define INITIATE_SSP_TASK 0x00 292#define INITIATE_SSP_TASK 0x00
293#define INITIATE_LONG_SSP_TASK 0x01 293#define INITIATE_LONG_SSP_TASK 0x01
294#define INITIATE_BIDIR_SSP_TASK 0x02 294#define INITIATE_BIDIR_SSP_TASK 0x02
295#define ABORT_TASK 0x03 295#define SCB_ABORT_TASK 0x03
296#define INITIATE_SSP_TMF 0x04 296#define INITIATE_SSP_TMF 0x04
297#define SSP_TARG_GET_DATA 0x05 297#define SSP_TARG_GET_DATA 0x05
298#define SSP_TARG_GET_DATA_GOOD 0x06 298#define SSP_TARG_GET_DATA_GOOD 0x06
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 0febad4dd75f..ab350504ca5a 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -458,13 +458,19 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
458 tc_abort = le16_to_cpu(tc_abort); 458 tc_abort = le16_to_cpu(tc_abort);
459 459
460 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { 460 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
461 struct sas_task *task = ascb->uldd_task; 461 struct sas_task *task = a->uldd_task;
462
463 if (a->tc_index != tc_abort)
464 continue;
462 465
463 if (task && a->tc_index == tc_abort) { 466 if (task) {
464 failed_dev = task->dev; 467 failed_dev = task->dev;
465 sas_task_abort(task); 468 sas_task_abort(task);
466 break; 469 } else {
470 ASD_DPRINTK("R_T_A for non TASK scb 0x%x\n",
471 a->scb->header.opcode);
467 } 472 }
473 break;
468 } 474 }
469 475
470 if (!failed_dev) { 476 if (!failed_dev) {
@@ -478,7 +484,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
478 * that the EH will wake up and do something. 484 * that the EH will wake up and do something.
479 */ 485 */
480 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) { 486 list_for_each_entry_safe(a, b, &asd_ha->seq.pend_q, list) {
481 struct sas_task *task = ascb->uldd_task; 487 struct sas_task *task = a->uldd_task;
482 488
483 if (task && 489 if (task &&
484 task->dev == failed_dev && 490 task->dev == failed_dev &&
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 87b2f6e6adfe..144f5ad20453 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -151,8 +151,6 @@ static int asd_clear_nexus_I_T(struct domain_device *dev)
151 CLEAR_NEXUS_PRE; 151 CLEAR_NEXUS_PRE;
152 scb->clear_nexus.nexus = NEXUS_I_T; 152 scb->clear_nexus.nexus = NEXUS_I_T;
153 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; 153 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
154 if (dev->tproto)
155 scb->clear_nexus.flags |= SUSPEND_TX;
156 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) 154 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
157 dev->lldd_dev); 155 dev->lldd_dev);
158 CLEAR_NEXUS_POST; 156 CLEAR_NEXUS_POST;
@@ -169,8 +167,6 @@ static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
169 CLEAR_NEXUS_PRE; 167 CLEAR_NEXUS_PRE;
170 scb->clear_nexus.nexus = NEXUS_I_T_L; 168 scb->clear_nexus.nexus = NEXUS_I_T_L;
171 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; 169 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
172 if (dev->tproto)
173 scb->clear_nexus.flags |= SUSPEND_TX;
174 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); 170 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) 171 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
176 dev->lldd_dev); 172 dev->lldd_dev);
@@ -369,7 +365,7 @@ int asd_abort_task(struct sas_task *task)
369 return -ENOMEM; 365 return -ENOMEM;
370 scb = ascb->scb; 366 scb = ascb->scb;
371 367
372 scb->header.opcode = ABORT_TASK; 368 scb->header.opcode = SCB_ABORT_TASK;
373 369
374 switch (task->task_proto) { 370 switch (task->task_proto) {
375 case SAS_PROTOCOL_SATA: 371 case SAS_PROTOCOL_SATA:
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 4f9ff32cfed0..f91f79c8007d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1387,18 +1387,16 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1387 switch(controlcode) { 1387 switch(controlcode) {
1388 1388
1389 case ARCMSR_MESSAGE_READ_RQBUFFER: { 1389 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1390 unsigned long *ver_addr; 1390 unsigned char *ver_addr;
1391 uint8_t *pQbuffer, *ptmpQbuffer; 1391 uint8_t *pQbuffer, *ptmpQbuffer;
1392 int32_t allxfer_len = 0; 1392 int32_t allxfer_len = 0;
1393 void *tmp;
1394 1393
1395 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); 1394 ver_addr = kmalloc(1032, GFP_ATOMIC);
1396 ver_addr = (unsigned long *)tmp; 1395 if (!ver_addr) {
1397 if (!tmp) {
1398 retvalue = ARCMSR_MESSAGE_FAIL; 1396 retvalue = ARCMSR_MESSAGE_FAIL;
1399 goto message_out; 1397 goto message_out;
1400 } 1398 }
1401 ptmpQbuffer = (uint8_t *) ver_addr; 1399 ptmpQbuffer = ver_addr;
1402 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 1400 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1403 && (allxfer_len < 1031)) { 1401 && (allxfer_len < 1031)) {
1404 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 1402 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
@@ -1427,26 +1425,24 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1427 } 1425 }
1428 arcmsr_iop_message_read(acb); 1426 arcmsr_iop_message_read(acb);
1429 } 1427 }
1430 memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len); 1428 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1431 pcmdmessagefld->cmdmessage.Length = allxfer_len; 1429 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1432 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; 1430 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1433 kfree(tmp); 1431 kfree(ver_addr);
1434 } 1432 }
1435 break; 1433 break;
1436 1434
1437 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 1435 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1438 unsigned long *ver_addr; 1436 unsigned char *ver_addr;
1439 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 1437 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1440 uint8_t *pQbuffer, *ptmpuserbuffer; 1438 uint8_t *pQbuffer, *ptmpuserbuffer;
1441 void *tmp;
1442 1439
1443 tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA); 1440 ver_addr = kmalloc(1032, GFP_ATOMIC);
1444 ver_addr = (unsigned long *)tmp; 1441 if (!ver_addr) {
1445 if (!tmp) {
1446 retvalue = ARCMSR_MESSAGE_FAIL; 1442 retvalue = ARCMSR_MESSAGE_FAIL;
1447 goto message_out; 1443 goto message_out;
1448 } 1444 }
1449 ptmpuserbuffer = (uint8_t *)ver_addr; 1445 ptmpuserbuffer = ver_addr;
1450 user_len = pcmdmessagefld->cmdmessage.Length; 1446 user_len = pcmdmessagefld->cmdmessage.Length;
1451 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); 1447 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
1452 wqbuf_lastindex = acb->wqbuf_lastindex; 1448 wqbuf_lastindex = acb->wqbuf_lastindex;
@@ -1492,7 +1488,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
1492 retvalue = ARCMSR_MESSAGE_FAIL; 1488 retvalue = ARCMSR_MESSAGE_FAIL;
1493 } 1489 }
1494 } 1490 }
1495 kfree(tmp); 1491 kfree(ver_addr);
1496 } 1492 }
1497 break; 1493 break;
1498 1494
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index fb5f20284389..a715632e19d4 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2018,6 +2018,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
2018 * the upper layers to process. This would have been set 2018 * the upper layers to process. This would have been set
2019 * correctly by fas216_std_done. 2019 * correctly by fas216_std_done.
2020 */ 2020 */
2021 scsi_eh_restore_cmnd(SCpnt, &info->ses);
2021 SCpnt->scsi_done(SCpnt); 2022 SCpnt->scsi_done(SCpnt);
2022} 2023}
2023 2024
@@ -2103,23 +2104,12 @@ request_sense:
2103 if (SCpnt->cmnd[0] == REQUEST_SENSE) 2104 if (SCpnt->cmnd[0] == REQUEST_SENSE)
2104 goto done; 2105 goto done;
2105 2106
2107 scsi_eh_prep_cmnd(SCpnt, &info->ses, NULL, 0, ~0);
2106 fas216_log_target(info, LOG_CONNECT, SCpnt->device->id, 2108 fas216_log_target(info, LOG_CONNECT, SCpnt->device->id,
2107 "requesting sense"); 2109 "requesting sense");
2108 memset(SCpnt->cmnd, 0, sizeof (SCpnt->cmnd)); 2110 init_SCp(SCpnt);
2109 SCpnt->cmnd[0] = REQUEST_SENSE;
2110 SCpnt->cmnd[1] = SCpnt->device->lun << 5;
2111 SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer);
2112 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
2113 SCpnt->SCp.buffer = NULL;
2114 SCpnt->SCp.buffers_residual = 0;
2115 SCpnt->SCp.ptr = (char *)SCpnt->sense_buffer;
2116 SCpnt->SCp.this_residual = sizeof(SCpnt->sense_buffer);
2117 SCpnt->SCp.phase = sizeof(SCpnt->sense_buffer);
2118 SCpnt->SCp.Message = 0; 2111 SCpnt->SCp.Message = 0;
2119 SCpnt->SCp.Status = 0; 2112 SCpnt->SCp.Status = 0;
2120 SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer);
2121 SCpnt->sc_data_direction = DMA_FROM_DEVICE;
2122 SCpnt->use_sg = 0;
2123 SCpnt->tag = 0; 2113 SCpnt->tag = 0;
2124 SCpnt->host_scribble = (void *)fas216_rq_sns_done; 2114 SCpnt->host_scribble = (void *)fas216_rq_sns_done;
2125 2115
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 00e5f055afdc..b65f4cf0eec9 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -16,6 +16,8 @@
16#define NO_IRQ 255 16#define NO_IRQ 255
17#endif 17#endif
18 18
19#include <scsi/scsi_eh.h>
20
19#include "queue.h" 21#include "queue.h"
20#include "msgqueue.h" 22#include "msgqueue.h"
21 23
@@ -311,6 +313,7 @@ typedef struct {
311 313
312 /* miscellaneous */ 314 /* miscellaneous */
313 int internal_done; /* flag to indicate request done */ 315 int internal_done; /* flag to indicate request done */
316 struct scsi_eh_save ses; /* holds request sense restore info */
314 unsigned long magic_end; 317 unsigned long magic_end;
315} FAS216_Info; 318} FAS216_Info;
316 319
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index c82523908c2e..6d67f5c0eb8e 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -642,12 +642,15 @@ static void __init gdth_search_dev(gdth_pci_str *pcistr, ushort *cnt,
642 *cnt, vendor, device)); 642 *cnt, vendor, device));
643 643
644 pdev = NULL; 644 pdev = NULL;
645 while ((pdev = pci_find_device(vendor, device, pdev)) 645 while ((pdev = pci_get_device(vendor, device, pdev))
646 != NULL) { 646 != NULL) {
647 if (pci_enable_device(pdev)) 647 if (pci_enable_device(pdev))
648 continue; 648 continue;
649 if (*cnt >= MAXHA) 649 if (*cnt >= MAXHA) {
650 pci_dev_put(pdev);
650 return; 651 return;
652 }
653
651 /* GDT PCI controller found, resources are already in pdev */ 654 /* GDT PCI controller found, resources are already in pdev */
652 pcistr[*cnt].pdev = pdev; 655 pcistr[*cnt].pdev = pdev;
653 pcistr[*cnt].irq = pdev->irq; 656 pcistr[*cnt].irq = pdev->irq;
@@ -4836,6 +4839,9 @@ static int __init gdth_isa_probe_one(ulong32 isa_bios)
4836 if (error) 4839 if (error)
4837 goto out_free_coal_stat; 4840 goto out_free_coal_stat;
4838 list_add_tail(&ha->list, &gdth_instances); 4841 list_add_tail(&ha->list, &gdth_instances);
4842
4843 scsi_scan_host(shp);
4844
4839 return 0; 4845 return 0;
4840 4846
4841 out_free_coal_stat: 4847 out_free_coal_stat:
@@ -4963,6 +4969,9 @@ static int __init gdth_eisa_probe_one(ushort eisa_slot)
4963 if (error) 4969 if (error)
4964 goto out_free_coal_stat; 4970 goto out_free_coal_stat;
4965 list_add_tail(&ha->list, &gdth_instances); 4971 list_add_tail(&ha->list, &gdth_instances);
4972
4973 scsi_scan_host(shp);
4974
4966 return 0; 4975 return 0;
4967 4976
4968 out_free_ccb_phys: 4977 out_free_ccb_phys:
@@ -5100,6 +5109,9 @@ static int __init gdth_pci_probe_one(gdth_pci_str *pcistr, int ctr)
5100 if (error) 5109 if (error)
5101 goto out_free_coal_stat; 5110 goto out_free_coal_stat;
5102 list_add_tail(&ha->list, &gdth_instances); 5111 list_add_tail(&ha->list, &gdth_instances);
5112
5113 scsi_scan_host(shp);
5114
5103 return 0; 5115 return 0;
5104 5116
5105 out_free_coal_stat: 5117 out_free_coal_stat:
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
index de5773443c62..ce0228e26aec 100644
--- a/drivers/scsi/gdth_proc.c
+++ b/drivers/scsi/gdth_proc.c
@@ -694,15 +694,13 @@ static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr)
694{ 694{
695 ulong flags; 695 ulong flags;
696 696
697 spin_lock_irqsave(&ha->smp_lock, flags);
698
699 if (buf == ha->pscratch) { 697 if (buf == ha->pscratch) {
698 spin_lock_irqsave(&ha->smp_lock, flags);
700 ha->scratch_busy = FALSE; 699 ha->scratch_busy = FALSE;
700 spin_unlock_irqrestore(&ha->smp_lock, flags);
701 } else { 701 } else {
702 pci_free_consistent(ha->pdev, size, buf, paddr); 702 pci_free_consistent(ha->pdev, size, buf, paddr);
703 } 703 }
704
705 spin_unlock_irqrestore(&ha->smp_lock, flags);
706} 704}
707 705
708#ifdef GDTH_IOCTL_PROC 706#ifdef GDTH_IOCTL_PROC
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2074701f7e76..c72014a3e7d4 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -5140,7 +5140,7 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5140 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 5140 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5141 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 5141 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5142 struct ipr_ioadl_desc *last_ioadl = NULL; 5142 struct ipr_ioadl_desc *last_ioadl = NULL;
5143 int len = qc->nbytes + qc->pad_len; 5143 int len = qc->nbytes;
5144 struct scatterlist *sg; 5144 struct scatterlist *sg;
5145 unsigned int si; 5145 unsigned int si;
5146 5146
@@ -5206,7 +5206,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU; 5206 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; 5207 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; 5208 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5209 ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; 5209 ipr_cmd->dma_use_sg = qc->n_elem;
5210 5210
5211 ipr_build_ata_ioadl(ipr_cmd, qc); 5211 ipr_build_ata_ioadl(ipr_cmd, qc);
5212 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION; 5212 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bb152fb9fec7..7ed568f180ae 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1576,7 +1576,7 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1576 METHOD_TRACE("ips_make_passthru", 1); 1576 METHOD_TRACE("ips_make_passthru", 1);
1577 1577
1578 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i) 1578 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1579 length += sg[i].length; 1579 length += sg->length;
1580 1580
1581 if (length < sizeof (ips_passthru_t)) { 1581 if (length < sizeof (ips_passthru_t)) {
1582 /* wrong size */ 1582 /* wrong size */
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 0996f866f14c..7cd05b599a12 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -178,8 +178,8 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
178 task->uldd_task = qc; 178 task->uldd_task = qc;
179 if (ata_is_atapi(qc->tf.protocol)) { 179 if (ata_is_atapi(qc->tf.protocol)) {
180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); 180 memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
181 task->total_xfer_len = qc->nbytes + qc->pad_len; 181 task->total_xfer_len = qc->nbytes;
182 task->num_scatter = qc->pad_len ? qc->n_elem + 1 : qc->n_elem; 182 task->num_scatter = qc->n_elem;
183 } else { 183 } else {
184 for_each_sg(qc->sg, sg, qc->n_elem, si) 184 for_each_sg(qc->sg, sg, qc->n_elem, si)
185 xfer += sg->length; 185 xfer += sg->length;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f869fba86807..704ea06a6e50 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -51,10 +51,14 @@ static void sas_scsi_task_done(struct sas_task *task)
51{ 51{
52 struct task_status_struct *ts = &task->task_status; 52 struct task_status_struct *ts = &task->task_status;
53 struct scsi_cmnd *sc = task->uldd_task; 53 struct scsi_cmnd *sc = task->uldd_task;
54 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(sc->device->host);
55 unsigned ts_flags = task->task_state_flags;
56 int hs = 0, stat = 0; 54 int hs = 0, stat = 0;
57 55
56 if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
57 /* Aborted tasks will be completed by the error handler */
58 SAS_DPRINTK("task done but aborted\n");
59 return;
60 }
61
58 if (unlikely(!sc)) { 62 if (unlikely(!sc)) {
59 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); 63 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
60 list_del_init(&task->list); 64 list_del_init(&task->list);
@@ -120,11 +124,7 @@ static void sas_scsi_task_done(struct sas_task *task)
120 sc->result = (hs << 16) | stat; 124 sc->result = (hs << 16) | stat;
121 list_del_init(&task->list); 125 list_del_init(&task->list);
122 sas_free_task(task); 126 sas_free_task(task);
123 /* This is very ugly but this is how SCSI Core works. */ 127 sc->scsi_done(sc);
124 if (ts_flags & SAS_TASK_STATE_ABORTED)
125 scsi_eh_finish_cmd(sc, &sas_ha->eh_done_q);
126 else
127 sc->scsi_done(sc);
128} 128}
129 129
130static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) 130static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd)
@@ -255,13 +255,34 @@ out:
255 return res; 255 return res;
256} 256}
257 257
258static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
259{
260 struct sas_task *task = TO_SAS_TASK(cmd);
261 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
262
263 /* remove the aborted task flag to allow the task to be
264 * completed now. At this point, we only get called following
265 * an actual abort of the task, so we should be guaranteed not
266 * to be racing with any completions from the LLD (hence we
267 * don't need the task state lock to clear the flag) */
268 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
269 /* Now call task_done. However, task will be free'd after
270 * this */
271 task->task_done(task);
272 /* now finish the command and move it on to the error
273 * handler done list, this also takes it off the
274 * error handler pending list */
275 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
276}
277
258static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 278static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
259{ 279{
260 struct scsi_cmnd *cmd, *n; 280 struct scsi_cmnd *cmd, *n;
261 281
262 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 282 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
263 if (cmd == my_cmd) 283 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
264 list_del_init(&cmd->eh_entry); 284 cmd->device->lun == my_cmd->device->lun)
285 sas_eh_finish_cmd(cmd);
265 } 286 }
266} 287}
267 288
@@ -274,7 +295,7 @@ static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
274 struct domain_device *x = cmd_to_domain_dev(cmd); 295 struct domain_device *x = cmd_to_domain_dev(cmd);
275 296
276 if (x == dev) 297 if (x == dev)
277 list_del_init(&cmd->eh_entry); 298 sas_eh_finish_cmd(cmd);
278 } 299 }
279} 300}
280 301
@@ -288,7 +309,7 @@ static void sas_scsi_clear_queue_port(struct list_head *error_q,
288 struct asd_sas_port *x = dev->port; 309 struct asd_sas_port *x = dev->port;
289 310
290 if (x == port) 311 if (x == port)
291 list_del_init(&cmd->eh_entry); 312 sas_eh_finish_cmd(cmd);
292 } 313 }
293} 314}
294 315
@@ -528,14 +549,14 @@ Again:
528 case TASK_IS_DONE: 549 case TASK_IS_DONE:
529 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 550 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__,
530 task); 551 task);
531 task->task_done(task); 552 sas_eh_finish_cmd(cmd);
532 if (need_reset) 553 if (need_reset)
533 try_to_reset_cmd_device(shost, cmd); 554 try_to_reset_cmd_device(shost, cmd);
534 continue; 555 continue;
535 case TASK_IS_ABORTED: 556 case TASK_IS_ABORTED:
536 SAS_DPRINTK("%s: task 0x%p is aborted\n", 557 SAS_DPRINTK("%s: task 0x%p is aborted\n",
537 __FUNCTION__, task); 558 __FUNCTION__, task);
538 task->task_done(task); 559 sas_eh_finish_cmd(cmd);
539 if (need_reset) 560 if (need_reset)
540 try_to_reset_cmd_device(shost, cmd); 561 try_to_reset_cmd_device(shost, cmd);
541 continue; 562 continue;
@@ -547,7 +568,7 @@ Again:
547 "recovered\n", 568 "recovered\n",
548 SAS_ADDR(task->dev), 569 SAS_ADDR(task->dev),
549 cmd->device->lun); 570 cmd->device->lun);
550 task->task_done(task); 571 sas_eh_finish_cmd(cmd);
551 if (need_reset) 572 if (need_reset)
552 try_to_reset_cmd_device(shost, cmd); 573 try_to_reset_cmd_device(shost, cmd);
553 sas_scsi_clear_queue_lu(work_q, cmd); 574 sas_scsi_clear_queue_lu(work_q, cmd);
@@ -562,7 +583,7 @@ Again:
562 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 583 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
563 SAS_DPRINTK("I_T %016llx recovered\n", 584 SAS_DPRINTK("I_T %016llx recovered\n",
564 SAS_ADDR(task->dev->sas_addr)); 585 SAS_ADDR(task->dev->sas_addr));
565 task->task_done(task); 586 sas_eh_finish_cmd(cmd);
566 if (need_reset) 587 if (need_reset)
567 try_to_reset_cmd_device(shost, cmd); 588 try_to_reset_cmd_device(shost, cmd);
568 sas_scsi_clear_queue_I_T(work_q, task->dev); 589 sas_scsi_clear_queue_I_T(work_q, task->dev);
@@ -577,7 +598,7 @@ Again:
577 if (res == TMF_RESP_FUNC_COMPLETE) { 598 if (res == TMF_RESP_FUNC_COMPLETE) {
578 SAS_DPRINTK("clear nexus port:%d " 599 SAS_DPRINTK("clear nexus port:%d "
579 "succeeded\n", port->id); 600 "succeeded\n", port->id);
580 task->task_done(task); 601 sas_eh_finish_cmd(cmd);
581 if (need_reset) 602 if (need_reset)
582 try_to_reset_cmd_device(shost, cmd); 603 try_to_reset_cmd_device(shost, cmd);
583 sas_scsi_clear_queue_port(work_q, 604 sas_scsi_clear_queue_port(work_q,
@@ -591,10 +612,10 @@ Again:
591 if (res == TMF_RESP_FUNC_COMPLETE) { 612 if (res == TMF_RESP_FUNC_COMPLETE) {
592 SAS_DPRINTK("clear nexus ha " 613 SAS_DPRINTK("clear nexus ha "
593 "succeeded\n"); 614 "succeeded\n");
594 task->task_done(task); 615 sas_eh_finish_cmd(cmd);
595 if (need_reset) 616 if (need_reset)
596 try_to_reset_cmd_device(shost, cmd); 617 try_to_reset_cmd_device(shost, cmd);
597 goto out; 618 goto clear_q;
598 } 619 }
599 } 620 }
600 /* If we are here -- this means that no amount 621 /* If we are here -- this means that no amount
@@ -606,21 +627,18 @@ Again:
606 SAS_ADDR(task->dev->sas_addr), 627 SAS_ADDR(task->dev->sas_addr),
607 cmd->device->lun); 628 cmd->device->lun);
608 629
609 task->task_done(task); 630 sas_eh_finish_cmd(cmd);
610 if (need_reset) 631 if (need_reset)
611 try_to_reset_cmd_device(shost, cmd); 632 try_to_reset_cmd_device(shost, cmd);
612 goto clear_q; 633 goto clear_q;
613 } 634 }
614 } 635 }
615out:
616 return list_empty(work_q); 636 return list_empty(work_q);
617clear_q: 637clear_q:
618 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); 638 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__);
619 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 639 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
620 struct sas_task *task = TO_SAS_TASK(cmd); 640 sas_eh_finish_cmd(cmd);
621 list_del_init(&cmd->eh_entry); 641
622 task->task_done(task);
623 }
624 return list_empty(work_q); 642 return list_empty(work_q);
625} 643}
626 644
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 83567b9755b4..2ab2d24dcc15 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -307,6 +307,7 @@ struct lpfc_vport {
307 307
308 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */ 308 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
309 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */ 309 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
310 uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */
310 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN]; 311 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
311 struct lpfc_name fc_nodename; /* fc nodename */ 312 struct lpfc_name fc_nodename; /* fc nodename */
312 struct lpfc_name fc_portname; /* fc portname */ 313 struct lpfc_name fc_portname; /* fc portname */
@@ -392,6 +393,13 @@ enum hba_temp_state {
392 HBA_OVER_TEMP 393 HBA_OVER_TEMP
393}; 394};
394 395
396enum intr_type_t {
397 NONE = 0,
398 INTx,
399 MSI,
400 MSIX,
401};
402
395struct lpfc_hba { 403struct lpfc_hba {
396 struct lpfc_sli sli; 404 struct lpfc_sli sli;
397 uint32_t sli_rev; /* SLI2 or SLI3 */ 405 uint32_t sli_rev; /* SLI2 or SLI3 */
@@ -409,7 +417,7 @@ struct lpfc_hba {
409 /* This flag is set while issuing */ 417 /* This flag is set while issuing */
410 /* INIT_LINK mailbox command */ 418 /* INIT_LINK mailbox command */
411#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */ 419#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
412#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */ 420#define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
413 421
414 struct lpfc_sli2_slim *slim2p; 422 struct lpfc_sli2_slim *slim2p;
415 struct lpfc_dmabuf hbqslimp; 423 struct lpfc_dmabuf hbqslimp;
@@ -487,6 +495,8 @@ struct lpfc_hba {
487 wait_queue_head_t *work_wait; 495 wait_queue_head_t *work_wait;
488 struct task_struct *worker_thread; 496 struct task_struct *worker_thread;
489 497
498 uint32_t hbq_in_use; /* HBQs in use flag */
499 struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */
490 uint32_t hbq_count; /* Count of configured HBQs */ 500 uint32_t hbq_count; /* Count of configured HBQs */
491 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ 501 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
492 502
@@ -555,7 +565,8 @@ struct lpfc_hba {
555 mempool_t *nlp_mem_pool; 565 mempool_t *nlp_mem_pool;
556 566
557 struct fc_host_statistics link_stats; 567 struct fc_host_statistics link_stats;
558 uint8_t using_msi; 568 enum intr_type_t intr_type;
569 struct msix_entry msix_entries[1];
559 570
560 struct list_head port_list; 571 struct list_head port_list;
561 struct lpfc_vport *pport; /* physical lpfc_vport pointer */ 572 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
@@ -595,6 +606,8 @@ struct lpfc_hba {
595 unsigned long last_completion_time; 606 unsigned long last_completion_time;
596 struct timer_list hb_tmofunc; 607 struct timer_list hb_tmofunc;
597 uint8_t hb_outstanding; 608 uint8_t hb_outstanding;
609 /* ndlp reference management */
610 spinlock_t ndlp_lock;
598 /* 611 /*
599 * Following bit will be set for all buffer tags which are not 612 * Following bit will be set for all buffer tags which are not
600 * associated with any HBQ. 613 * associated with any HBQ.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4bae4a2ed2f1..b12a841703ca 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1191,7 +1191,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
1191 shost = lpfc_shost_from_vport(vport); 1191 shost = lpfc_shost_from_vport(vport);
1192 spin_lock_irq(shost->host_lock); 1192 spin_lock_irq(shost->host_lock);
1193 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) 1193 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
1194 if (ndlp->rport) 1194 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
1195 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 1195 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
1196 spin_unlock_irq(shost->host_lock); 1196 spin_unlock_irq(shost->host_lock);
1197} 1197}
@@ -1592,9 +1592,11 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
1592# support this feature 1592# support this feature
1593# 0 = MSI disabled (default) 1593# 0 = MSI disabled (default)
1594# 1 = MSI enabled 1594# 1 = MSI enabled
1595# Value range is [0,1]. Default value is 0. 1595# 2 = MSI-X enabled
1596# Value range is [0,2]. Default value is 0.
1596*/ 1597*/
1597LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); 1598LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
1599 "MSI-X (2), if possible");
1598 1600
1599/* 1601/*
1600# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. 1602# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
@@ -1946,11 +1948,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1946 } 1948 }
1947 1949
1948 /* If HBA encountered an error attention, allow only DUMP 1950 /* If HBA encountered an error attention, allow only DUMP
1949 * mailbox command until the HBA is restarted. 1951 * or RESTART mailbox commands until the HBA is restarted.
1950 */ 1952 */
1951 if ((phba->pport->stopped) && 1953 if ((phba->pport->stopped) &&
1952 (phba->sysfs_mbox.mbox->mb.mbxCommand 1954 (phba->sysfs_mbox.mbox->mb.mbxCommand !=
1953 != MBX_DUMP_MEMORY)) { 1955 MBX_DUMP_MEMORY &&
1956 phba->sysfs_mbox.mbox->mb.mbxCommand !=
1957 MBX_RESTART)) {
1954 sysfs_mbox_idle(phba); 1958 sysfs_mbox_idle(phba);
1955 spin_unlock_irq(&phba->hbalock); 1959 spin_unlock_irq(&phba->hbalock);
1956 return -EPERM; 1960 return -EPERM;
@@ -2384,7 +2388,8 @@ lpfc_get_node_by_target(struct scsi_target *starget)
2384 spin_lock_irq(shost->host_lock); 2388 spin_lock_irq(shost->host_lock);
2385 /* Search for this, mapped, target ID */ 2389 /* Search for this, mapped, target ID */
2386 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 2390 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2387 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 2391 if (NLP_CHK_NODE_ACT(ndlp) &&
2392 ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
2388 starget->id == ndlp->nlp_sid) { 2393 starget->id == ndlp->nlp_sid) {
2389 spin_unlock_irq(shost->host_lock); 2394 spin_unlock_irq(shost->host_lock);
2390 return ndlp; 2395 return ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 50fcb7c930bc..0819f5f39de5 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -53,7 +53,10 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
53void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 53void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
54void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 54void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
55void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 55void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
56void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
56void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); 57void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
58struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
59 struct lpfc_nodelist *, int);
57void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); 60void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
58void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); 61void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
59void lpfc_set_disctmo(struct lpfc_vport *); 62void lpfc_set_disctmo(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 92441ce610ed..3d0ccd9b341d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -294,7 +294,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
294 /* Save for completion so we can release these resources */ 294 /* Save for completion so we can release these resources */
295 geniocb->context1 = (uint8_t *) inp; 295 geniocb->context1 = (uint8_t *) inp;
296 geniocb->context2 = (uint8_t *) outp; 296 geniocb->context2 = (uint8_t *) outp;
297 geniocb->context_un.ndlp = ndlp; 297 geniocb->context_un.ndlp = lpfc_nlp_get(ndlp);
298 298
299 /* Fill in payload, bp points to frame payload */ 299 /* Fill in payload, bp points to frame payload */
300 icmd->ulpCommand = CMD_GEN_REQUEST64_CR; 300 icmd->ulpCommand = CMD_GEN_REQUEST64_CR;
@@ -489,8 +489,10 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
489 */ 489 */
490 ndlp = lpfc_findnode_did(vport, 490 ndlp = lpfc_findnode_did(vport,
491 Did); 491 Did);
492 if (ndlp && (ndlp->nlp_type & 492 if (ndlp &&
493 NLP_FCP_TARGET)) 493 NLP_CHK_NODE_ACT(ndlp)
494 && (ndlp->nlp_type &
495 NLP_FCP_TARGET))
494 lpfc_setup_disc_node 496 lpfc_setup_disc_node
495 (vport, Did); 497 (vport, Did);
496 else if (lpfc_ns_cmd(vport, 498 else if (lpfc_ns_cmd(vport,
@@ -773,7 +775,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
773 "0267 NameServer GFF Rsp " 775 "0267 NameServer GFF Rsp "
774 "x%x Error (%d %d) Data: x%x x%x\n", 776 "x%x Error (%d %d) Data: x%x x%x\n",
775 did, irsp->ulpStatus, irsp->un.ulpWord[4], 777 did, irsp->ulpStatus, irsp->un.ulpWord[4],
776 vport->fc_flag, vport->fc_rscn_id_cnt) 778 vport->fc_flag, vport->fc_rscn_id_cnt);
777 } 779 }
778 780
779 /* This is a target port, unregistered port, or the GFF_ID failed */ 781 /* This is a target port, unregistered port, or the GFF_ID failed */
@@ -1064,7 +1066,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1064 int rc = 0; 1066 int rc = 0;
1065 1067
1066 ndlp = lpfc_findnode_did(vport, NameServer_DID); 1068 ndlp = lpfc_findnode_did(vport, NameServer_DID);
1067 if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { 1069 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)
1070 || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
1068 rc=1; 1071 rc=1;
1069 goto ns_cmd_exit; 1072 goto ns_cmd_exit;
1070 } 1073 }
@@ -1213,8 +1216,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1213 cmpl = lpfc_cmpl_ct_cmd_rff_id; 1216 cmpl = lpfc_cmpl_ct_cmd_rff_id;
1214 break; 1217 break;
1215 } 1218 }
1216 lpfc_nlp_get(ndlp); 1219 /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
1217 1220 * to hold ndlp reference for the corresponding callback function.
1221 */
1218 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { 1222 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
1219 /* On success, The cmpl function will free the buffers */ 1223 /* On success, The cmpl function will free the buffers */
1220 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, 1224 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
@@ -1222,9 +1226,13 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1222 cmdcode, ndlp->nlp_DID, 0); 1226 cmdcode, ndlp->nlp_DID, 0);
1223 return 0; 1227 return 0;
1224 } 1228 }
1225
1226 rc=6; 1229 rc=6;
1230
1231 /* Decrement ndlp reference count to release ndlp reference held
1232 * for the failed command's callback function.
1233 */
1227 lpfc_nlp_put(ndlp); 1234 lpfc_nlp_put(ndlp);
1235
1228 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1236 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1229ns_cmd_free_bmp: 1237ns_cmd_free_bmp:
1230 kfree(bmp); 1238 kfree(bmp);
@@ -1271,6 +1279,9 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1271 } 1279 }
1272 1280
1273 ndlp = lpfc_findnode_did(vport, FDMI_DID); 1281 ndlp = lpfc_findnode_did(vport, FDMI_DID);
1282 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
1283 goto fail_out;
1284
1274 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 1285 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
1275 /* FDMI rsp failed */ 1286 /* FDMI rsp failed */
1276 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1287 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -1294,6 +1305,8 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1294 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA); 1305 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
1295 break; 1306 break;
1296 } 1307 }
1308
1309fail_out:
1297 lpfc_ct_free_iocb(phba, cmdiocb); 1310 lpfc_ct_free_iocb(phba, cmdiocb);
1298 return; 1311 return;
1299} 1312}
@@ -1650,12 +1663,18 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1650 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1663 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1651 1664
1652 cmpl = lpfc_cmpl_ct_cmd_fdmi; 1665 cmpl = lpfc_cmpl_ct_cmd_fdmi;
1653 lpfc_nlp_get(ndlp);
1654 1666
1667 /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
1668 * to hold ndlp reference for the corresponding callback function.
1669 */
1655 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0)) 1670 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
1656 return 0; 1671 return 0;
1657 1672
1673 /* Decrement ndlp reference count to release ndlp reference held
1674 * for the failed command's callback function.
1675 */
1658 lpfc_nlp_put(ndlp); 1676 lpfc_nlp_put(ndlp);
1677
1659 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1678 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1660fdmi_cmd_free_bmp: 1679fdmi_cmd_free_bmp:
1661 kfree(bmp); 1680 kfree(bmp);
@@ -1698,7 +1717,7 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
1698 struct lpfc_nodelist *ndlp; 1717 struct lpfc_nodelist *ndlp;
1699 1718
1700 ndlp = lpfc_findnode_did(vport, FDMI_DID); 1719 ndlp = lpfc_findnode_did(vport, FDMI_DID);
1701 if (ndlp) { 1720 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
1702 if (init_utsname()->nodename[0] != '\0') 1721 if (init_utsname()->nodename[0] != '\0')
1703 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 1722 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
1704 else 1723 else
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index cfe81c50529a..2db0b74b6fad 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -73,6 +73,12 @@ struct lpfc_nodelist {
73 uint8_t nlp_fcp_info; /* class info, bits 0-3 */ 73 uint8_t nlp_fcp_info; /* class info, bits 0-3 */
74#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ 74#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
75 75
76 uint16_t nlp_usg_map; /* ndlp management usage bitmap */
77#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
78#define NLP_USG_IACT_REQ_BIT 0x2 /* Request to inactivate ndlp */
79#define NLP_USG_FREE_REQ_BIT 0x4 /* Request to invoke ndlp memory free */
80#define NLP_USG_FREE_ACK_BIT 0x8 /* Indicate ndlp memory free invoked */
81
76 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 82 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
77 struct fc_rport *rport; /* Corresponding FC transport 83 struct fc_rport *rport; /* Corresponding FC transport
78 port structure */ 84 port structure */
@@ -85,25 +91,51 @@ struct lpfc_nodelist {
85}; 91};
86 92
87/* Defines for nlp_flag (uint32) */ 93/* Defines for nlp_flag (uint32) */
88#define NLP_PLOGI_SND 0x20 /* sent PLOGI request for this entry */ 94#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
89#define NLP_PRLI_SND 0x40 /* sent PRLI request for this entry */ 95#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
90#define NLP_ADISC_SND 0x80 /* sent ADISC request for this entry */ 96#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */
91#define NLP_LOGO_SND 0x100 /* sent LOGO request for this entry */ 97#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */
92#define NLP_RNID_SND 0x400 /* sent RNID request for this entry */ 98#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */
93#define NLP_ELS_SND_MASK 0x7e0 /* sent ELS request for this entry */ 99#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */
94#define NLP_DEFER_RM 0x10000 /* Remove this ndlp if no longer used */ 100#define NLP_DEFER_RM 0x00010000 /* Remove this ndlp if no longer used */
95#define NLP_DELAY_TMO 0x20000 /* delay timeout is running for node */ 101#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */
96#define NLP_NPR_2B_DISC 0x40000 /* node is included in num_disc_nodes */ 102#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */
97#define NLP_RCV_PLOGI 0x80000 /* Rcv'ed PLOGI from remote system */ 103#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */
98#define NLP_LOGO_ACC 0x100000 /* Process LOGO after ACC completes */ 104#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */
99#define NLP_TGT_NO_SCSIID 0x200000 /* good PRLI but no binding for scsid */ 105#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */
100#define NLP_ACC_REGLOGIN 0x1000000 /* Issue Reg Login after successful 106#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful
101 ACC */ 107 ACC */
102#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 108#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from
103 NPR list */ 109 NPR list */
104#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */ 110#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */
105#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ 111#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
106#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ 112#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
113#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
114
115/* ndlp usage management macros */
116#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
117 & NLP_USG_NODE_ACT_BIT) \
118 && \
119 !((ndlp)->nlp_usg_map \
120 & NLP_USG_FREE_ACK_BIT))
121#define NLP_SET_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
122 |= NLP_USG_NODE_ACT_BIT)
123#define NLP_INT_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
124 = NLP_USG_NODE_ACT_BIT)
125#define NLP_CLR_NODE_ACT(ndlp) ((ndlp)->nlp_usg_map \
126 &= ~NLP_USG_NODE_ACT_BIT)
127#define NLP_CHK_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
128 & NLP_USG_IACT_REQ_BIT)
129#define NLP_SET_IACT_REQ(ndlp) ((ndlp)->nlp_usg_map \
130 |= NLP_USG_IACT_REQ_BIT)
131#define NLP_CHK_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
132 & NLP_USG_FREE_REQ_BIT)
133#define NLP_SET_FREE_REQ(ndlp) ((ndlp)->nlp_usg_map \
134 |= NLP_USG_FREE_REQ_BIT)
135#define NLP_CHK_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
136 & NLP_USG_FREE_ACK_BIT)
137#define NLP_SET_FREE_ACK(ndlp) ((ndlp)->nlp_usg_map \
138 |= NLP_USG_FREE_ACK_BIT)
107 139
108/* There are 4 different double linked lists nodelist entries can reside on. 140/* There are 4 different double linked lists nodelist entries can reside on.
109 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used 141 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c6b739dc6bc3..cbb68a942255 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -113,6 +113,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
113 113
114 if (elsiocb == NULL) 114 if (elsiocb == NULL)
115 return NULL; 115 return NULL;
116
116 icmd = &elsiocb->iocb; 117 icmd = &elsiocb->iocb;
117 118
118 /* fill in BDEs for command */ 119 /* fill in BDEs for command */
@@ -134,9 +135,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
134 if (!prsp || !prsp->virt) 135 if (!prsp || !prsp->virt)
135 goto els_iocb_free_prsp_exit; 136 goto els_iocb_free_prsp_exit;
136 INIT_LIST_HEAD(&prsp->list); 137 INIT_LIST_HEAD(&prsp->list);
137 } else { 138 } else
138 prsp = NULL; 139 prsp = NULL;
139 }
140 140
141 /* Allocate buffer for Buffer ptr list */ 141 /* Allocate buffer for Buffer ptr list */
142 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 142 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -246,7 +246,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
246 246
247 sp = &phba->fc_fabparam; 247 sp = &phba->fc_fabparam;
248 ndlp = lpfc_findnode_did(vport, Fabric_DID); 248 ndlp = lpfc_findnode_did(vport, Fabric_DID);
249 if (!ndlp) { 249 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
250 err = 1; 250 err = 1;
251 goto fail; 251 goto fail;
252 } 252 }
@@ -282,6 +282,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
282 282
283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 283 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
284 mbox->vport = vport; 284 mbox->vport = vport;
285 /* increment the reference count on ndlp to hold reference
286 * for the callback routine.
287 */
285 mbox->context2 = lpfc_nlp_get(ndlp); 288 mbox->context2 = lpfc_nlp_get(ndlp);
286 289
287 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 290 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -293,6 +296,9 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
293 return 0; 296 return 0;
294 297
295fail_issue_reg_login: 298fail_issue_reg_login:
299 /* decrement the reference count on ndlp just incremented
300 * for the failed mbox command.
301 */
296 lpfc_nlp_put(ndlp); 302 lpfc_nlp_put(ndlp);
297 mp = (struct lpfc_dmabuf *) mbox->context1; 303 mp = (struct lpfc_dmabuf *) mbox->context1;
298 lpfc_mbuf_free(phba, mp->virt, mp->phys); 304 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -381,6 +387,8 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
381 */ 387 */
382 list_for_each_entry_safe(np, next_np, 388 list_for_each_entry_safe(np, next_np,
383 &vport->fc_nodes, nlp_listp) { 389 &vport->fc_nodes, nlp_listp) {
390 if (!NLP_CHK_NODE_ACT(ndlp))
391 continue;
384 if ((np->nlp_state != NLP_STE_NPR_NODE) || 392 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
385 !(np->nlp_flag & NLP_NPR_ADISC)) 393 !(np->nlp_flag & NLP_NPR_ADISC))
386 continue; 394 continue;
@@ -456,6 +464,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
456 mempool_free(mbox, phba->mbox_mem_pool); 464 mempool_free(mbox, phba->mbox_mem_pool);
457 goto fail; 465 goto fail;
458 } 466 }
467 /* Decrement ndlp reference count indicating that ndlp can be
468 * safely released when other references to it are done.
469 */
459 lpfc_nlp_put(ndlp); 470 lpfc_nlp_put(ndlp);
460 471
461 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 472 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
@@ -467,22 +478,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
467 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 478 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
468 if (!ndlp) 479 if (!ndlp)
469 goto fail; 480 goto fail;
470
471 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID); 481 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
482 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
483 ndlp = lpfc_enable_node(vport, ndlp,
484 NLP_STE_UNUSED_NODE);
485 if(!ndlp)
486 goto fail;
472 } 487 }
473 488
474 memcpy(&ndlp->nlp_portname, &sp->portName, 489 memcpy(&ndlp->nlp_portname, &sp->portName,
475 sizeof(struct lpfc_name)); 490 sizeof(struct lpfc_name));
476 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 491 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
477 sizeof(struct lpfc_name)); 492 sizeof(struct lpfc_name));
493 /* Set state will put ndlp onto node list if not already done */
478 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 494 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
479 spin_lock_irq(shost->host_lock); 495 spin_lock_irq(shost->host_lock);
480 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 496 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
481 spin_unlock_irq(shost->host_lock); 497 spin_unlock_irq(shost->host_lock);
482 } else { 498 } else
483 /* This side will wait for the PLOGI */ 499 /* This side will wait for the PLOGI, decrement ndlp reference
500 * count indicating that ndlp can be released when other
501 * references to it are done.
502 */
484 lpfc_nlp_put(ndlp); 503 lpfc_nlp_put(ndlp);
485 }
486 504
487 /* If we are pt2pt with another NPort, force NPIV off! */ 505 /* If we are pt2pt with another NPort, force NPIV off! */
488 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 506 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -728,16 +746,21 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
728 if (!ndlp) 746 if (!ndlp)
729 return 0; 747 return 0;
730 lpfc_nlp_init(vport, ndlp, Fabric_DID); 748 lpfc_nlp_init(vport, ndlp, Fabric_DID);
731 } else { 749 /* Put ndlp onto node list */
732 lpfc_dequeue_node(vport, ndlp); 750 lpfc_enqueue_node(vport, ndlp);
751 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
752 /* re-setup ndlp without removing from node list */
753 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
754 if (!ndlp)
755 return 0;
733 } 756 }
734 757
735 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 758 if (lpfc_issue_els_flogi(vport, ndlp, 0))
736 /* This decrement of reference count to node shall kick off 759 /* This decrement of reference count to node shall kick off
737 * the release of the node. 760 * the release of the node.
738 */ 761 */
739 lpfc_nlp_put(ndlp); 762 lpfc_nlp_put(ndlp);
740 } 763
741 return 1; 764 return 1;
742} 765}
743 766
@@ -755,9 +778,15 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
755 if (!ndlp) 778 if (!ndlp)
756 return 0; 779 return 0;
757 lpfc_nlp_init(vport, ndlp, Fabric_DID); 780 lpfc_nlp_init(vport, ndlp, Fabric_DID);
758 } else { 781 /* Put ndlp onto node list */
759 lpfc_dequeue_node(vport, ndlp); 782 lpfc_enqueue_node(vport, ndlp);
783 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
784 /* re-setup ndlp without removing from node list */
785 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
786 if (!ndlp)
787 return 0;
760 } 788 }
789
761 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 790 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
762 /* decrement node reference count to trigger the release of 791 /* decrement node reference count to trigger the release of
763 * the node. 792 * the node.
@@ -816,7 +845,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
816 */ 845 */
817 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 846 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
818 847
819 if (new_ndlp == ndlp) 848 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
820 return ndlp; 849 return ndlp;
821 850
822 if (!new_ndlp) { 851 if (!new_ndlp) {
@@ -827,8 +856,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
827 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 856 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
828 if (!new_ndlp) 857 if (!new_ndlp)
829 return ndlp; 858 return ndlp;
830
831 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID); 859 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
860 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
861 new_ndlp = lpfc_enable_node(vport, new_ndlp,
862 NLP_STE_UNUSED_NODE);
863 if (!new_ndlp)
864 return ndlp;
832 } 865 }
833 866
834 lpfc_unreg_rpi(vport, new_ndlp); 867 lpfc_unreg_rpi(vport, new_ndlp);
@@ -839,6 +872,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
839 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; 872 new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
840 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 873 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
841 874
875 /* Set state will put new_ndlp on to node list if not already done */
842 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 876 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
843 877
844 /* Move this back to NPR state */ 878 /* Move this back to NPR state */
@@ -912,7 +946,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
912 irsp->un.elsreq64.remoteID); 946 irsp->un.elsreq64.remoteID);
913 947
914 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 948 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
915 if (!ndlp) { 949 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
916 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 950 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
917 "0136 PLOGI completes to NPort x%x " 951 "0136 PLOGI completes to NPort x%x "
918 "with no ndlp. Data: x%x x%x x%x\n", 952 "with no ndlp. Data: x%x x%x x%x\n",
@@ -962,12 +996,11 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
962 } 996 }
963 /* PLOGI failed */ 997 /* PLOGI failed */
964 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 998 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
965 if (lpfc_error_lost_link(irsp)) { 999 if (lpfc_error_lost_link(irsp))
966 rc = NLP_STE_FREED_NODE; 1000 rc = NLP_STE_FREED_NODE;
967 } else { 1001 else
968 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1002 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
969 NLP_EVT_CMPL_PLOGI); 1003 NLP_EVT_CMPL_PLOGI);
970 }
971 } else { 1004 } else {
972 /* Good status, call state machine */ 1005 /* Good status, call state machine */
973 prsp = list_entry(((struct lpfc_dmabuf *) 1006 prsp = list_entry(((struct lpfc_dmabuf *)
@@ -1015,8 +1048,10 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1015 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1048 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1016 1049
1017 ndlp = lpfc_findnode_did(vport, did); 1050 ndlp = lpfc_findnode_did(vport, did);
1018 /* If ndlp if not NULL, we will bump the reference count on it */ 1051 if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1052 ndlp = NULL;
1019 1053
1054 /* If ndlp is not NULL, we will bump the reference count on it */
1020 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1055 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1021 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 1056 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1022 ELS_CMD_PLOGI); 1057 ELS_CMD_PLOGI);
@@ -1097,18 +1132,15 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1097 } 1132 }
1098 /* PRLI failed */ 1133 /* PRLI failed */
1099 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1134 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1100 if (lpfc_error_lost_link(irsp)) { 1135 if (lpfc_error_lost_link(irsp))
1101 goto out; 1136 goto out;
1102 } else { 1137 else
1103 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1138 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1104 NLP_EVT_CMPL_PRLI); 1139 NLP_EVT_CMPL_PRLI);
1105 } 1140 } else
1106 } else {
1107 /* Good status, call state machine */ 1141 /* Good status, call state machine */
1108 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1142 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1109 NLP_EVT_CMPL_PRLI); 1143 NLP_EVT_CMPL_PRLI);
1110 }
1111
1112out: 1144out:
1113 lpfc_els_free_iocb(phba, cmdiocb); 1145 lpfc_els_free_iocb(phba, cmdiocb);
1114 return; 1146 return;
@@ -1275,15 +1307,13 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1275 } 1307 }
1276 /* ADISC failed */ 1308 /* ADISC failed */
1277 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1309 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1278 if (!lpfc_error_lost_link(irsp)) { 1310 if (!lpfc_error_lost_link(irsp))
1279 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1311 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1280 NLP_EVT_CMPL_ADISC); 1312 NLP_EVT_CMPL_ADISC);
1281 } 1313 } else
1282 } else {
1283 /* Good status, call state machine */ 1314 /* Good status, call state machine */
1284 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1315 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1285 NLP_EVT_CMPL_ADISC); 1316 NLP_EVT_CMPL_ADISC);
1286 }
1287 1317
1288 if (disc && vport->num_disc_nodes) { 1318 if (disc && vport->num_disc_nodes) {
1289 /* Check to see if there are more ADISCs to be sent */ 1319 /* Check to see if there are more ADISCs to be sent */
@@ -1443,14 +1473,12 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1443 else 1473 else
1444 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1474 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1445 NLP_EVT_CMPL_LOGO); 1475 NLP_EVT_CMPL_LOGO);
1446 } else { 1476 } else
1447 /* Good status, call state machine. 1477 /* Good status, call state machine.
1448 * This will unregister the rpi if needed. 1478 * This will unregister the rpi if needed.
1449 */ 1479 */
1450 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1480 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1451 NLP_EVT_CMPL_LOGO); 1481 NLP_EVT_CMPL_LOGO);
1452 }
1453
1454out: 1482out:
1455 lpfc_els_free_iocb(phba, cmdiocb); 1483 lpfc_els_free_iocb(phba, cmdiocb);
1456 return; 1484 return;
@@ -1556,11 +1584,19 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1556 psli = &phba->sli; 1584 psli = &phba->sli;
1557 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1585 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1558 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 1586 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
1559 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1560 if (!ndlp)
1561 return 1;
1562 1587
1563 lpfc_nlp_init(vport, ndlp, nportid); 1588 ndlp = lpfc_findnode_did(vport, nportid);
1589 if (!ndlp) {
1590 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1591 if (!ndlp)
1592 return 1;
1593 lpfc_nlp_init(vport, ndlp, nportid);
1594 lpfc_enqueue_node(vport, ndlp);
1595 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1596 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1597 if (!ndlp)
1598 return 1;
1599 }
1564 1600
1565 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1601 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1566 ndlp->nlp_DID, ELS_CMD_SCR); 1602 ndlp->nlp_DID, ELS_CMD_SCR);
@@ -1623,11 +1659,19 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1623 psli = &phba->sli; 1659 psli = &phba->sli;
1624 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1660 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1625 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 1661 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
1626 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1627 if (!ndlp)
1628 return 1;
1629 1662
1630 lpfc_nlp_init(vport, ndlp, nportid); 1663 ndlp = lpfc_findnode_did(vport, nportid);
1664 if (!ndlp) {
1665 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1666 if (!ndlp)
1667 return 1;
1668 lpfc_nlp_init(vport, ndlp, nportid);
1669 lpfc_enqueue_node(vport, ndlp);
1670 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1671 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1672 if (!ndlp)
1673 return 1;
1674 }
1631 1675
1632 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1676 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1633 ndlp->nlp_DID, ELS_CMD_RNID); 1677 ndlp->nlp_DID, ELS_CMD_RNID);
@@ -1657,7 +1701,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1657 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 1701 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
1658 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 1702 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1659 ondlp = lpfc_findnode_did(vport, nportid); 1703 ondlp = lpfc_findnode_did(vport, nportid);
1660 if (ondlp) { 1704 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
1661 memcpy(&fp->OportName, &ondlp->nlp_portname, 1705 memcpy(&fp->OportName, &ondlp->nlp_portname,
1662 sizeof(struct lpfc_name)); 1706 sizeof(struct lpfc_name));
1663 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 1707 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
@@ -1690,6 +1734,7 @@ void
1690lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 1734lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1691{ 1735{
1692 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1736 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1737 struct lpfc_work_evt *evtp;
1693 1738
1694 spin_lock_irq(shost->host_lock); 1739 spin_lock_irq(shost->host_lock);
1695 nlp->nlp_flag &= ~NLP_DELAY_TMO; 1740 nlp->nlp_flag &= ~NLP_DELAY_TMO;
@@ -1697,8 +1742,12 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1697 del_timer_sync(&nlp->nlp_delayfunc); 1742 del_timer_sync(&nlp->nlp_delayfunc);
1698 nlp->nlp_last_elscmd = 0; 1743 nlp->nlp_last_elscmd = 0;
1699 1744
1700 if (!list_empty(&nlp->els_retry_evt.evt_listp)) 1745 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
1701 list_del_init(&nlp->els_retry_evt.evt_listp); 1746 list_del_init(&nlp->els_retry_evt.evt_listp);
1747 /* Decrement nlp reference count held for the delayed retry */
1748 evtp = &nlp->els_retry_evt;
1749 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
1750 }
1702 1751
1703 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 1752 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1704 spin_lock_irq(shost->host_lock); 1753 spin_lock_irq(shost->host_lock);
@@ -1842,13 +1891,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1842 cmd = *elscmd++; 1891 cmd = *elscmd++;
1843 } 1892 }
1844 1893
1845 if (ndlp) 1894 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
1846 did = ndlp->nlp_DID; 1895 did = ndlp->nlp_DID;
1847 else { 1896 else {
1848 /* We should only hit this case for retrying PLOGI */ 1897 /* We should only hit this case for retrying PLOGI */
1849 did = irsp->un.elsreq64.remoteID; 1898 did = irsp->un.elsreq64.remoteID;
1850 ndlp = lpfc_findnode_did(vport, did); 1899 ndlp = lpfc_findnode_did(vport, did);
1851 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 1900 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
1901 && (cmd != ELS_CMD_PLOGI))
1852 return 1; 1902 return 1;
1853 } 1903 }
1854 1904
@@ -1870,18 +1920,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1870 break; 1920 break;
1871 1921
1872 case IOERR_ILLEGAL_COMMAND: 1922 case IOERR_ILLEGAL_COMMAND:
1873 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) && 1923 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1874 (cmd == ELS_CMD_FDISC)) { 1924 "0124 Retry illegal cmd x%x "
1875 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1925 "retry:x%x delay:x%x\n",
1876 "0124 FDISC failed (3/6) " 1926 cmd, cmdiocb->retry, delay);
1877 "retrying...\n"); 1927 retry = 1;
1878 lpfc_mbx_unreg_vpi(vport); 1928 /* All command's retry policy */
1879 retry = 1; 1929 maxretry = 8;
1880 /* FDISC retry policy */ 1930 if (cmdiocb->retry > 2)
1881 maxretry = 48; 1931 delay = 1000;
1882 if (cmdiocb->retry >= 32)
1883 delay = 1000;
1884 }
1885 break; 1932 break;
1886 1933
1887 case IOERR_NO_RESOURCES: 1934 case IOERR_NO_RESOURCES:
@@ -1967,6 +2014,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1967 break; 2014 break;
1968 2015
1969 case LSRJT_LOGICAL_ERR: 2016 case LSRJT_LOGICAL_ERR:
2017 /* There are some cases where switches return this
2018 * error when they are not ready and should be returning
2019 * Logical Busy. We should delay every time.
2020 */
2021 if (cmd == ELS_CMD_FDISC &&
2022 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
2023 maxretry = 3;
2024 delay = 1000;
2025 retry = 1;
2026 break;
2027 }
1970 case LSRJT_PROTOCOL_ERR: 2028 case LSRJT_PROTOCOL_ERR:
1971 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2029 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1972 (cmd == ELS_CMD_FDISC) && 2030 (cmd == ELS_CMD_FDISC) &&
@@ -1996,7 +2054,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1996 retry = 1; 2054 retry = 1;
1997 2055
1998 if ((cmd == ELS_CMD_FLOGI) && 2056 if ((cmd == ELS_CMD_FLOGI) &&
1999 (phba->fc_topology != TOPOLOGY_LOOP)) { 2057 (phba->fc_topology != TOPOLOGY_LOOP) &&
2058 !lpfc_error_lost_link(irsp)) {
2000 /* FLOGI retry policy */ 2059 /* FLOGI retry policy */
2001 retry = 1; 2060 retry = 1;
2002 maxretry = 48; 2061 maxretry = 48;
@@ -2322,6 +2381,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2322 if ((rspiocb->iocb.ulpStatus == 0) 2381 if ((rspiocb->iocb.ulpStatus == 0)
2323 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 2382 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
2324 lpfc_unreg_rpi(vport, ndlp); 2383 lpfc_unreg_rpi(vport, ndlp);
2384 /* Increment reference count to ndlp to hold the
2385 * reference to ndlp for the callback function.
2386 */
2325 mbox->context2 = lpfc_nlp_get(ndlp); 2387 mbox->context2 = lpfc_nlp_get(ndlp);
2326 mbox->vport = vport; 2388 mbox->vport = vport;
2327 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 2389 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
@@ -2335,9 +2397,13 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2335 NLP_STE_REG_LOGIN_ISSUE); 2397 NLP_STE_REG_LOGIN_ISSUE);
2336 } 2398 }
2337 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 2399 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
2338 != MBX_NOT_FINISHED) { 2400 != MBX_NOT_FINISHED)
2339 goto out; 2401 goto out;
2340 } 2402 else
2403 /* Decrement the ndlp reference count we
2404 * set for this failed mailbox command.
2405 */
2406 lpfc_nlp_put(ndlp);
2341 2407
2342 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 2408 /* ELS rsp: Cannot issue reg_login for <NPortid> */
2343 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2409 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -2796,6 +2862,8 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport)
2796 2862
2797 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2863 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2798 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2864 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2865 if (!NLP_CHK_NODE_ACT(ndlp))
2866 continue;
2799 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2867 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2800 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2868 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2801 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 2869 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
@@ -2833,6 +2901,8 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
2833 2901
2834 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 2902 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
2835 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 2903 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2904 if (!NLP_CHK_NODE_ACT(ndlp))
2905 continue;
2836 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2906 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2837 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2907 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2838 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 2908 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
@@ -2869,6 +2939,16 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
2869 struct lpfc_hba *phba = vport->phba; 2939 struct lpfc_hba *phba = vport->phba;
2870 int i; 2940 int i;
2871 2941
2942 spin_lock_irq(shost->host_lock);
2943 if (vport->fc_rscn_flush) {
2944 /* Another thread is walking fc_rscn_id_list on this vport */
2945 spin_unlock_irq(shost->host_lock);
2946 return;
2947 }
2948 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
2949 vport->fc_rscn_flush = 1;
2950 spin_unlock_irq(shost->host_lock);
2951
2872 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2952 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2873 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 2953 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2874 vport->fc_rscn_id_list[i] = NULL; 2954 vport->fc_rscn_id_list[i] = NULL;
@@ -2878,6 +2958,8 @@ lpfc_els_flush_rscn(struct lpfc_vport *vport)
2878 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 2958 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2879 spin_unlock_irq(shost->host_lock); 2959 spin_unlock_irq(shost->host_lock);
2880 lpfc_can_disctmo(vport); 2960 lpfc_can_disctmo(vport);
2961 /* Indicate we are done walking this fc_rscn_id_list */
2962 vport->fc_rscn_flush = 0;
2881} 2963}
2882 2964
2883int 2965int
@@ -2887,6 +2969,7 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2887 D_ID rscn_did; 2969 D_ID rscn_did;
2888 uint32_t *lp; 2970 uint32_t *lp;
2889 uint32_t payload_len, i; 2971 uint32_t payload_len, i;
2972 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2890 2973
2891 ns_did.un.word = did; 2974 ns_did.un.word = did;
2892 2975
@@ -2898,6 +2981,15 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2898 if (vport->fc_flag & FC_RSCN_DISCOVERY) 2981 if (vport->fc_flag & FC_RSCN_DISCOVERY)
2899 return did; 2982 return did;
2900 2983
2984 spin_lock_irq(shost->host_lock);
2985 if (vport->fc_rscn_flush) {
2986 /* Another thread is walking fc_rscn_id_list on this vport */
2987 spin_unlock_irq(shost->host_lock);
2988 return 0;
2989 }
2990 /* Indicate we are walking fc_rscn_id_list on this vport */
2991 vport->fc_rscn_flush = 1;
2992 spin_unlock_irq(shost->host_lock);
2901 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 2993 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2902 lp = vport->fc_rscn_id_list[i]->virt; 2994 lp = vport->fc_rscn_id_list[i]->virt;
2903 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 2995 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
@@ -2908,16 +3000,16 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2908 switch (rscn_did.un.b.resv) { 3000 switch (rscn_did.un.b.resv) {
2909 case 0: /* Single N_Port ID effected */ 3001 case 0: /* Single N_Port ID effected */
2910 if (ns_did.un.word == rscn_did.un.word) 3002 if (ns_did.un.word == rscn_did.un.word)
2911 return did; 3003 goto return_did_out;
2912 break; 3004 break;
2913 case 1: /* Whole N_Port Area effected */ 3005 case 1: /* Whole N_Port Area effected */
2914 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 3006 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
2915 && (ns_did.un.b.area == rscn_did.un.b.area)) 3007 && (ns_did.un.b.area == rscn_did.un.b.area))
2916 return did; 3008 goto return_did_out;
2917 break; 3009 break;
2918 case 2: /* Whole N_Port Domain effected */ 3010 case 2: /* Whole N_Port Domain effected */
2919 if (ns_did.un.b.domain == rscn_did.un.b.domain) 3011 if (ns_did.un.b.domain == rscn_did.un.b.domain)
2920 return did; 3012 goto return_did_out;
2921 break; 3013 break;
2922 default: 3014 default:
2923 /* Unknown Identifier in RSCN node */ 3015 /* Unknown Identifier in RSCN node */
@@ -2926,11 +3018,17 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2926 "RSCN payload Data: x%x\n", 3018 "RSCN payload Data: x%x\n",
2927 rscn_did.un.word); 3019 rscn_did.un.word);
2928 case 3: /* Whole Fabric effected */ 3020 case 3: /* Whole Fabric effected */
2929 return did; 3021 goto return_did_out;
2930 } 3022 }
2931 } 3023 }
2932 } 3024 }
3025 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3026 vport->fc_rscn_flush = 0;
2933 return 0; 3027 return 0;
3028return_did_out:
3029 /* Indicate we are done with walking fc_rscn_id_list on this vport */
3030 vport->fc_rscn_flush = 0;
3031 return did;
2934} 3032}
2935 3033
2936static int 3034static int
@@ -2943,7 +3041,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
2943 */ 3041 */
2944 3042
2945 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3043 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2946 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || 3044 if (!NLP_CHK_NODE_ACT(ndlp) ||
3045 ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
2947 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0) 3046 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
2948 continue; 3047 continue;
2949 3048
@@ -2971,7 +3070,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2971 uint32_t *lp, *datap; 3070 uint32_t *lp, *datap;
2972 IOCB_t *icmd; 3071 IOCB_t *icmd;
2973 uint32_t payload_len, length, nportid, *cmd; 3072 uint32_t payload_len, length, nportid, *cmd;
2974 int rscn_cnt = vport->fc_rscn_id_cnt; 3073 int rscn_cnt;
2975 int rscn_id = 0, hba_id = 0; 3074 int rscn_id = 0, hba_id = 0;
2976 int i; 3075 int i;
2977 3076
@@ -2984,7 +3083,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2984 /* RSCN received */ 3083 /* RSCN received */
2985 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3084 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2986 "0214 RSCN received Data: x%x x%x x%x x%x\n", 3085 "0214 RSCN received Data: x%x x%x x%x x%x\n",
2987 vport->fc_flag, payload_len, *lp, rscn_cnt); 3086 vport->fc_flag, payload_len, *lp,
3087 vport->fc_rscn_id_cnt);
2988 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 3088 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2989 fc_host_post_event(shost, fc_get_event_number(), 3089 fc_host_post_event(shost, fc_get_event_number(),
2990 FCH_EVT_RSCN, lp[i]); 3090 FCH_EVT_RSCN, lp[i]);
@@ -3022,7 +3122,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3022 "0214 Ignore RSCN " 3122 "0214 Ignore RSCN "
3023 "Data: x%x x%x x%x x%x\n", 3123 "Data: x%x x%x x%x x%x\n",
3024 vport->fc_flag, payload_len, 3124 vport->fc_flag, payload_len,
3025 *lp, rscn_cnt); 3125 *lp, vport->fc_rscn_id_cnt);
3026 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3126 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3027 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 3127 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
3028 ndlp->nlp_DID, vport->port_state, 3128 ndlp->nlp_DID, vport->port_state,
@@ -3034,6 +3134,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3034 } 3134 }
3035 } 3135 }
3036 3136
3137 spin_lock_irq(shost->host_lock);
3138 if (vport->fc_rscn_flush) {
3139 /* Another thread is walking fc_rscn_id_list on this vport */
3140 spin_unlock_irq(shost->host_lock);
3141 vport->fc_flag |= FC_RSCN_DISCOVERY;
3142 return 0;
3143 }
3144 /* Indicate we are walking fc_rscn_id_list on this vport */
3145 vport->fc_rscn_flush = 1;
3146 spin_unlock_irq(shost->host_lock);
3147 /* Get the array count after sucessfully have the token */
3148 rscn_cnt = vport->fc_rscn_id_cnt;
3037 /* If we are already processing an RSCN, save the received 3149 /* If we are already processing an RSCN, save the received
3038 * RSCN payload buffer, cmdiocb->context2 to process later. 3150 * RSCN payload buffer, cmdiocb->context2 to process later.
3039 */ 3151 */
@@ -3055,7 +3167,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3055 if ((rscn_cnt) && 3167 if ((rscn_cnt) &&
3056 (payload_len + length <= LPFC_BPL_SIZE)) { 3168 (payload_len + length <= LPFC_BPL_SIZE)) {
3057 *cmd &= ELS_CMD_MASK; 3169 *cmd &= ELS_CMD_MASK;
3058 *cmd |= be32_to_cpu(payload_len + length); 3170 *cmd |= cpu_to_be32(payload_len + length);
3059 memcpy(((uint8_t *)cmd) + length, lp, 3171 memcpy(((uint8_t *)cmd) + length, lp,
3060 payload_len); 3172 payload_len);
3061 } else { 3173 } else {
@@ -3066,7 +3178,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3066 */ 3178 */
3067 cmdiocb->context2 = NULL; 3179 cmdiocb->context2 = NULL;
3068 } 3180 }
3069
3070 /* Deferred RSCN */ 3181 /* Deferred RSCN */
3071 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3182 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3072 "0235 Deferred RSCN " 3183 "0235 Deferred RSCN "
@@ -3083,9 +3194,10 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3083 vport->fc_rscn_id_cnt, vport->fc_flag, 3194 vport->fc_rscn_id_cnt, vport->fc_flag,
3084 vport->port_state); 3195 vport->port_state);
3085 } 3196 }
3197 /* Indicate we are done walking fc_rscn_id_list on this vport */
3198 vport->fc_rscn_flush = 0;
3086 /* Send back ACC */ 3199 /* Send back ACC */
3087 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3200 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3088
3089 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3201 /* send RECOVERY event for ALL nodes that match RSCN payload */
3090 lpfc_rscn_recovery_check(vport); 3202 lpfc_rscn_recovery_check(vport);
3091 spin_lock_irq(shost->host_lock); 3203 spin_lock_irq(shost->host_lock);
@@ -3093,7 +3205,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3093 spin_unlock_irq(shost->host_lock); 3205 spin_unlock_irq(shost->host_lock);
3094 return 0; 3206 return 0;
3095 } 3207 }
3096
3097 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 3208 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3098 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 3209 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
3099 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 3210 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
@@ -3102,20 +3213,18 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3102 vport->fc_flag |= FC_RSCN_MODE; 3213 vport->fc_flag |= FC_RSCN_MODE;
3103 spin_unlock_irq(shost->host_lock); 3214 spin_unlock_irq(shost->host_lock);
3104 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 3215 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
3216 /* Indicate we are done walking fc_rscn_id_list on this vport */
3217 vport->fc_rscn_flush = 0;
3105 /* 3218 /*
3106 * If we zero, cmdiocb->context2, the calling routine will 3219 * If we zero, cmdiocb->context2, the calling routine will
3107 * not try to free it. 3220 * not try to free it.
3108 */ 3221 */
3109 cmdiocb->context2 = NULL; 3222 cmdiocb->context2 = NULL;
3110
3111 lpfc_set_disctmo(vport); 3223 lpfc_set_disctmo(vport);
3112
3113 /* Send back ACC */ 3224 /* Send back ACC */
3114 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 3225 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
3115
3116 /* send RECOVERY event for ALL nodes that match RSCN payload */ 3226 /* send RECOVERY event for ALL nodes that match RSCN payload */
3117 lpfc_rscn_recovery_check(vport); 3227 lpfc_rscn_recovery_check(vport);
3118
3119 return lpfc_els_handle_rscn(vport); 3228 return lpfc_els_handle_rscn(vport);
3120} 3229}
3121 3230
@@ -3145,7 +3254,8 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
3145 vport->num_disc_nodes = 0; 3254 vport->num_disc_nodes = 0;
3146 3255
3147 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3256 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3148 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 3257 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
3258 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
3149 /* Good ndlp, issue CT Request to NameServer */ 3259 /* Good ndlp, issue CT Request to NameServer */
3150 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0) 3260 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
3151 /* Wait for NameServer query cmpl before we can 3261 /* Wait for NameServer query cmpl before we can
@@ -3155,25 +3265,35 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
3155 /* If login to NameServer does not exist, issue one */ 3265 /* If login to NameServer does not exist, issue one */
3156 /* Good status, issue PLOGI to NameServer */ 3266 /* Good status, issue PLOGI to NameServer */
3157 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3267 ndlp = lpfc_findnode_did(vport, NameServer_DID);
3158 if (ndlp) 3268 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3159 /* Wait for NameServer login cmpl before we can 3269 /* Wait for NameServer login cmpl before we can
3160 continue */ 3270 continue */
3161 return 1; 3271 return 1;
3162 3272
3163 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3273 if (ndlp) {
3164 if (!ndlp) { 3274 ndlp = lpfc_enable_node(vport, ndlp,
3165 lpfc_els_flush_rscn(vport); 3275 NLP_STE_PLOGI_ISSUE);
3166 return 0; 3276 if (!ndlp) {
3277 lpfc_els_flush_rscn(vport);
3278 return 0;
3279 }
3280 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
3167 } else { 3281 } else {
3282 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3283 if (!ndlp) {
3284 lpfc_els_flush_rscn(vport);
3285 return 0;
3286 }
3168 lpfc_nlp_init(vport, ndlp, NameServer_DID); 3287 lpfc_nlp_init(vport, ndlp, NameServer_DID);
3169 ndlp->nlp_type |= NLP_FABRIC;
3170 ndlp->nlp_prev_state = ndlp->nlp_state; 3288 ndlp->nlp_prev_state = ndlp->nlp_state;
3171 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3289 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3172 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
3173 /* Wait for NameServer login cmpl before we can
3174 continue */
3175 return 1;
3176 } 3290 }
3291 ndlp->nlp_type |= NLP_FABRIC;
3292 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
3293 /* Wait for NameServer login cmpl before we can
3294 * continue
3295 */
3296 return 1;
3177 } 3297 }
3178 3298
3179 lpfc_els_flush_rscn(vport); 3299 lpfc_els_flush_rscn(vport);
@@ -3672,6 +3792,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3672 3792
3673 list_for_each_entry_safe(ndlp, next_ndlp, 3793 list_for_each_entry_safe(ndlp, next_ndlp,
3674 &vport->fc_nodes, nlp_listp) { 3794 &vport->fc_nodes, nlp_listp) {
3795 if (!NLP_CHK_NODE_ACT(ndlp))
3796 continue;
3675 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3797 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3676 continue; 3798 continue;
3677 if (ndlp->nlp_type & NLP_FABRIC) { 3799 if (ndlp->nlp_type & NLP_FABRIC) {
@@ -3697,6 +3819,8 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3697 */ 3819 */
3698 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 3820 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3699 nlp_listp) { 3821 nlp_listp) {
3822 if (!NLP_CHK_NODE_ACT(ndlp))
3823 continue;
3700 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3824 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3701 continue; 3825 continue;
3702 3826
@@ -3936,7 +4060,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3936 uint32_t cmd, did, newnode, rjt_err = 0; 4060 uint32_t cmd, did, newnode, rjt_err = 0;
3937 IOCB_t *icmd = &elsiocb->iocb; 4061 IOCB_t *icmd = &elsiocb->iocb;
3938 4062
3939 if (vport == NULL || elsiocb->context2 == NULL) 4063 if (!vport || !(elsiocb->context2))
3940 goto dropit; 4064 goto dropit;
3941 4065
3942 newnode = 0; 4066 newnode = 0;
@@ -3971,14 +4095,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3971 lpfc_nlp_init(vport, ndlp, did); 4095 lpfc_nlp_init(vport, ndlp, did);
3972 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4096 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3973 newnode = 1; 4097 newnode = 1;
3974 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 4098 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
3975 ndlp->nlp_type |= NLP_FABRIC; 4099 ndlp->nlp_type |= NLP_FABRIC;
4100 } else {
4101 if (!NLP_CHK_NODE_ACT(ndlp)) {
4102 ndlp = lpfc_enable_node(vport, ndlp,
4103 NLP_STE_UNUSED_NODE);
4104 if (!ndlp)
4105 goto dropit;
3976 } 4106 }
3977 }
3978 else {
3979 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 4107 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3980 /* This is simular to the new node path */ 4108 /* This is simular to the new node path */
3981 lpfc_nlp_get(ndlp); 4109 ndlp = lpfc_nlp_get(ndlp);
4110 if (!ndlp)
4111 goto dropit;
3982 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4112 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3983 newnode = 1; 4113 newnode = 1;
3984 } 4114 }
@@ -3987,6 +4117,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3987 phba->fc_stat.elsRcvFrame++; 4117 phba->fc_stat.elsRcvFrame++;
3988 if (elsiocb->context1) 4118 if (elsiocb->context1)
3989 lpfc_nlp_put(elsiocb->context1); 4119 lpfc_nlp_put(elsiocb->context1);
4120
3990 elsiocb->context1 = lpfc_nlp_get(ndlp); 4121 elsiocb->context1 = lpfc_nlp_get(ndlp);
3991 elsiocb->vport = vport; 4122 elsiocb->vport = vport;
3992 4123
@@ -4007,8 +4138,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4007 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 4138 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
4008 4139
4009 if (vport->port_state < LPFC_DISC_AUTH) { 4140 if (vport->port_state < LPFC_DISC_AUTH) {
4010 rjt_err = LSRJT_UNABLE_TPC; 4141 if (!(phba->pport->fc_flag & FC_PT2PT) ||
4011 break; 4142 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
4143 rjt_err = LSRJT_UNABLE_TPC;
4144 break;
4145 }
4146 /* We get here, and drop thru, if we are PT2PT with
4147 * another NPort and the other side has initiated
4148 * the PLOGI before responding to our FLOGI.
4149 */
4012 } 4150 }
4013 4151
4014 shost = lpfc_shost_from_vport(vport); 4152 shost = lpfc_shost_from_vport(vport);
@@ -4251,15 +4389,15 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4251 vport = lpfc_find_vport_by_vpid(phba, vpi); 4389 vport = lpfc_find_vport_by_vpid(phba, vpi);
4252 } 4390 }
4253 } 4391 }
4254 /* If there are no BDEs associated 4392 /* If there are no BDEs associated
4255 * with this IOCB, there is nothing to do. 4393 * with this IOCB, there is nothing to do.
4256 */ 4394 */
4257 if (icmd->ulpBdeCount == 0) 4395 if (icmd->ulpBdeCount == 0)
4258 return; 4396 return;
4259 4397
4260 /* type of ELS cmd is first 32bit word 4398 /* type of ELS cmd is first 32bit word
4261 * in packet 4399 * in packet
4262 */ 4400 */
4263 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4401 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4264 elsiocb->context2 = bdeBuf1; 4402 elsiocb->context2 = bdeBuf1;
4265 } else { 4403 } else {
@@ -4314,6 +4452,18 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4314 } 4452 }
4315 lpfc_nlp_init(vport, ndlp, NameServer_DID); 4453 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4316 ndlp->nlp_type |= NLP_FABRIC; 4454 ndlp->nlp_type |= NLP_FABRIC;
4455 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4456 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4457 if (!ndlp) {
4458 if (phba->fc_topology == TOPOLOGY_LOOP) {
4459 lpfc_disc_start(vport);
4460 return;
4461 }
4462 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4463 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4464 "0348 NameServer login: node freed\n");
4465 return;
4466 }
4317 } 4467 }
4318 4468
4319 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4469 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
@@ -4360,6 +4510,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4360 switch (mb->mbxStatus) { 4510 switch (mb->mbxStatus) {
4361 case 0x11: /* unsupported feature */ 4511 case 0x11: /* unsupported feature */
4362 case 0x9603: /* max_vpi exceeded */ 4512 case 0x9603: /* max_vpi exceeded */
4513 case 0x9602: /* Link event since CLEAR_LA */
4363 /* giving up on vport registration */ 4514 /* giving up on vport registration */
4364 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4515 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4365 spin_lock_irq(shost->host_lock); 4516 spin_lock_irq(shost->host_lock);
@@ -4373,7 +4524,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4373 spin_lock_irq(shost->host_lock); 4524 spin_lock_irq(shost->host_lock);
4374 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4525 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4375 spin_unlock_irq(shost->host_lock); 4526 spin_unlock_irq(shost->host_lock);
4376 lpfc_initial_fdisc(vport); 4527 if (vport->port_type == LPFC_PHYSICAL_PORT)
4528 lpfc_initial_flogi(vport);
4529 else
4530 lpfc_initial_fdisc(vport);
4377 break; 4531 break;
4378 } 4532 }
4379 4533
@@ -4471,7 +4625,6 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4471 irsp->ulpStatus, irsp->un.ulpWord[4]); 4625 irsp->ulpStatus, irsp->un.ulpWord[4]);
4472 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING) 4626 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
4473 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4627 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4474
4475 lpfc_nlp_put(ndlp); 4628 lpfc_nlp_put(ndlp);
4476 /* giving up on FDISC. Cancel discovery timer */ 4629 /* giving up on FDISC. Cancel discovery timer */
4477 lpfc_can_disctmo(vport); 4630 lpfc_can_disctmo(vport);
@@ -4492,8 +4645,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4492 */ 4645 */
4493 list_for_each_entry_safe(np, next_np, 4646 list_for_each_entry_safe(np, next_np,
4494 &vport->fc_nodes, nlp_listp) { 4647 &vport->fc_nodes, nlp_listp) {
4495 if (np->nlp_state != NLP_STE_NPR_NODE 4648 if (!NLP_CHK_NODE_ACT(ndlp) ||
4496 || !(np->nlp_flag & NLP_NPR_ADISC)) 4649 (np->nlp_state != NLP_STE_NPR_NODE) ||
4650 !(np->nlp_flag & NLP_NPR_ADISC))
4497 continue; 4651 continue;
4498 spin_lock_irq(shost->host_lock); 4652 spin_lock_irq(shost->host_lock);
4499 np->nlp_flag &= ~NLP_NPR_ADISC; 4653 np->nlp_flag &= ~NLP_NPR_ADISC;
@@ -4599,6 +4753,8 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4599{ 4753{
4600 struct lpfc_vport *vport = cmdiocb->vport; 4754 struct lpfc_vport *vport = cmdiocb->vport;
4601 IOCB_t *irsp; 4755 IOCB_t *irsp;
4756 struct lpfc_nodelist *ndlp;
4757 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
4602 4758
4603 irsp = &rspiocb->iocb; 4759 irsp = &rspiocb->iocb;
4604 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4760 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -4607,6 +4763,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4607 4763
4608 lpfc_els_free_iocb(phba, cmdiocb); 4764 lpfc_els_free_iocb(phba, cmdiocb);
4609 vport->unreg_vpi_cmpl = VPORT_ERROR; 4765 vport->unreg_vpi_cmpl = VPORT_ERROR;
4766
4767 /* Trigger the release of the ndlp after logo */
4768 lpfc_nlp_put(ndlp);
4610} 4769}
4611 4770
4612int 4771int
@@ -4686,11 +4845,12 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
4686repeat: 4845repeat:
4687 iocb = NULL; 4846 iocb = NULL;
4688 spin_lock_irqsave(&phba->hbalock, iflags); 4847 spin_lock_irqsave(&phba->hbalock, iflags);
4689 /* Post any pending iocb to the SLI layer */ 4848 /* Post any pending iocb to the SLI layer */
4690 if (atomic_read(&phba->fabric_iocb_count) == 0) { 4849 if (atomic_read(&phba->fabric_iocb_count) == 0) {
4691 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 4850 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
4692 list); 4851 list);
4693 if (iocb) 4852 if (iocb)
4853 /* Increment fabric iocb count to hold the position */
4694 atomic_inc(&phba->fabric_iocb_count); 4854 atomic_inc(&phba->fabric_iocb_count);
4695 } 4855 }
4696 spin_unlock_irqrestore(&phba->hbalock, iflags); 4856 spin_unlock_irqrestore(&phba->hbalock, iflags);
@@ -4737,9 +4897,7 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4737 int blocked; 4897 int blocked;
4738 4898
4739 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4899 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4740 /* Start a timer to unblock fabric 4900 /* Start a timer to unblock fabric iocbs after 100ms */
4741 * iocbs after 100ms
4742 */
4743 if (!blocked) 4901 if (!blocked)
4744 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); 4902 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
4745 4903
@@ -4787,8 +4945,8 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4787 4945
4788 atomic_dec(&phba->fabric_iocb_count); 4946 atomic_dec(&phba->fabric_iocb_count);
4789 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 4947 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
4790 /* Post any pending iocbs to HBA */ 4948 /* Post any pending iocbs to HBA */
4791 lpfc_resume_fabric_iocbs(phba); 4949 lpfc_resume_fabric_iocbs(phba);
4792 } 4950 }
4793} 4951}
4794 4952
@@ -4807,6 +4965,9 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4807 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 4965 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
4808 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 4966 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4809 4967
4968 if (ready)
4969 /* Increment fabric iocb count to hold the position */
4970 atomic_inc(&phba->fabric_iocb_count);
4810 spin_unlock_irqrestore(&phba->hbalock, iflags); 4971 spin_unlock_irqrestore(&phba->hbalock, iflags);
4811 if (ready) { 4972 if (ready) {
4812 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 4973 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
@@ -4817,7 +4978,6 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4817 "Fabric sched2: ste:x%x", 4978 "Fabric sched2: ste:x%x",
4818 iocb->vport->port_state, 0, 0); 4979 iocb->vport->port_state, 0, 0);
4819 4980
4820 atomic_inc(&phba->fabric_iocb_count);
4821 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); 4981 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4822 4982
4823 if (ret == IOCB_ERROR) { 4983 if (ret == IOCB_ERROR) {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index dc042bd97baa..976653440fba 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -272,9 +272,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
272 if (!(vport->load_flag & FC_UNLOADING) && 272 if (!(vport->load_flag & FC_UNLOADING) &&
273 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 273 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
274 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 274 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
275 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) { 275 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 276 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
277 }
278} 277}
279 278
280 279
@@ -566,9 +565,10 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
566 int rc; 565 int rc;
567 566
568 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 567 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
568 if (!NLP_CHK_NODE_ACT(ndlp))
569 continue;
569 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 570 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
570 continue; 571 continue;
571
572 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 572 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
573 ((vport->port_type == LPFC_NPIV_PORT) && 573 ((vport->port_type == LPFC_NPIV_PORT) &&
574 (ndlp->nlp_DID == NameServer_DID))) 574 (ndlp->nlp_DID == NameServer_DID)))
@@ -629,9 +629,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
629 LPFC_MBOXQ_t *mb; 629 LPFC_MBOXQ_t *mb;
630 int i; 630 int i;
631 631
632 if (phba->link_state == LPFC_LINK_DOWN) { 632 if (phba->link_state == LPFC_LINK_DOWN)
633 return 0; 633 return 0;
634 }
635 spin_lock_irq(&phba->hbalock); 634 spin_lock_irq(&phba->hbalock);
636 if (phba->link_state > LPFC_LINK_DOWN) { 635 if (phba->link_state > LPFC_LINK_DOWN) {
637 phba->link_state = LPFC_LINK_DOWN; 636 phba->link_state = LPFC_LINK_DOWN;
@@ -684,20 +683,21 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
684 struct lpfc_nodelist *ndlp; 683 struct lpfc_nodelist *ndlp;
685 684
686 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 685 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
686 if (!NLP_CHK_NODE_ACT(ndlp))
687 continue;
687 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 688 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
688 continue; 689 continue;
689
690 if (ndlp->nlp_type & NLP_FABRIC) { 690 if (ndlp->nlp_type & NLP_FABRIC) {
691 /* On Linkup its safe to clean up the ndlp 691 /* On Linkup its safe to clean up the ndlp
692 * from Fabric connections. 692 * from Fabric connections.
693 */ 693 */
694 if (ndlp->nlp_DID != Fabric_DID) 694 if (ndlp->nlp_DID != Fabric_DID)
695 lpfc_unreg_rpi(vport, ndlp); 695 lpfc_unreg_rpi(vport, ndlp);
696 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 696 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
697 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 697 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
698 /* Fail outstanding IO now since device is 698 /* Fail outstanding IO now since device is
699 * marked for PLOGI. 699 * marked for PLOGI.
700 */ 700 */
701 lpfc_unreg_rpi(vport, ndlp); 701 lpfc_unreg_rpi(vport, ndlp);
702 } 702 }
703 } 703 }
@@ -799,21 +799,9 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
799 writel(control, phba->HCregaddr); 799 writel(control, phba->HCregaddr);
800 readl(phba->HCregaddr); /* flush */ 800 readl(phba->HCregaddr); /* flush */
801 spin_unlock_irq(&phba->hbalock); 801 spin_unlock_irq(&phba->hbalock);
802 mempool_free(pmb, phba->mbox_mem_pool);
802 return; 803 return;
803 804
804 vport->num_disc_nodes = 0;
805 /* go thru NPR nodes and issue ELS PLOGIs */
806 if (vport->fc_npr_cnt)
807 lpfc_els_disc_plogi(vport);
808
809 if (!vport->num_disc_nodes) {
810 spin_lock_irq(shost->host_lock);
811 vport->fc_flag &= ~FC_NDISC_ACTIVE;
812 spin_unlock_irq(shost->host_lock);
813 }
814
815 vport->port_state = LPFC_VPORT_READY;
816
817out: 805out:
818 /* Device Discovery completes */ 806 /* Device Discovery completes */
819 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 807 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
@@ -1133,7 +1121,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1133 if (la->attType == AT_LINK_UP) { 1121 if (la->attType == AT_LINK_UP) {
1134 phba->fc_stat.LinkUp++; 1122 phba->fc_stat.LinkUp++;
1135 if (phba->link_flag & LS_LOOPBACK_MODE) { 1123 if (phba->link_flag & LS_LOOPBACK_MODE) {
1136 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1124 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1137 "1306 Link Up Event in loop back mode " 1125 "1306 Link Up Event in loop back mode "
1138 "x%x received Data: x%x x%x x%x x%x\n", 1126 "x%x received Data: x%x x%x x%x x%x\n",
1139 la->eventTag, phba->fc_eventTag, 1127 la->eventTag, phba->fc_eventTag,
@@ -1150,11 +1138,21 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150 lpfc_mbx_process_link_up(phba, la); 1138 lpfc_mbx_process_link_up(phba, la);
1151 } else { 1139 } else {
1152 phba->fc_stat.LinkDown++; 1140 phba->fc_stat.LinkDown++;
1153 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1141 if (phba->link_flag & LS_LOOPBACK_MODE) {
1142 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1143 "1308 Link Down Event in loop back mode "
1144 "x%x received "
1145 "Data: x%x x%x x%x\n",
1146 la->eventTag, phba->fc_eventTag,
1147 phba->pport->port_state, vport->fc_flag);
1148 }
1149 else {
1150 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1154 "1305 Link Down Event x%x received " 1151 "1305 Link Down Event x%x received "
1155 "Data: x%x x%x x%x\n", 1152 "Data: x%x x%x x%x\n",
1156 la->eventTag, phba->fc_eventTag, 1153 la->eventTag, phba->fc_eventTag,
1157 phba->pport->port_state, vport->fc_flag); 1154 phba->pport->port_state, vport->fc_flag);
1155 }
1158 lpfc_mbx_issue_link_down(phba); 1156 lpfc_mbx_issue_link_down(phba);
1159 } 1157 }
1160 1158
@@ -1305,7 +1303,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1305 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1303 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1306 kfree(mp); 1304 kfree(mp);
1307 mempool_free(pmb, phba->mbox_mem_pool); 1305 mempool_free(pmb, phba->mbox_mem_pool);
1308 lpfc_nlp_put(ndlp);
1309 1306
1310 if (phba->fc_topology == TOPOLOGY_LOOP) { 1307 if (phba->fc_topology == TOPOLOGY_LOOP) {
1311 /* FLOGI failed, use loop map to make discovery list */ 1308 /* FLOGI failed, use loop map to make discovery list */
@@ -1313,6 +1310,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1313 1310
1314 /* Start discovery */ 1311 /* Start discovery */
1315 lpfc_disc_start(vport); 1312 lpfc_disc_start(vport);
1313 /* Decrement the reference count to ndlp after the
1314 * reference to the ndlp are done.
1315 */
1316 lpfc_nlp_put(ndlp);
1316 return; 1317 return;
1317 } 1318 }
1318 1319
@@ -1320,6 +1321,10 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1320 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1321 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1321 "0258 Register Fabric login error: 0x%x\n", 1322 "0258 Register Fabric login error: 0x%x\n",
1322 mb->mbxStatus); 1323 mb->mbxStatus);
1324 /* Decrement the reference count to ndlp after the reference
1325 * to the ndlp are done.
1326 */
1327 lpfc_nlp_put(ndlp);
1323 return; 1328 return;
1324 } 1329 }
1325 1330
@@ -1327,8 +1332,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1327 ndlp->nlp_type |= NLP_FABRIC; 1332 ndlp->nlp_type |= NLP_FABRIC;
1328 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 1333 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1329 1334
1330 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
1331
1332 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1335 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
1333 vports = lpfc_create_vport_work_array(phba); 1336 vports = lpfc_create_vport_work_array(phba);
1334 if (vports != NULL) 1337 if (vports != NULL)
@@ -1356,6 +1359,11 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1356 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1359 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1357 kfree(mp); 1360 kfree(mp);
1358 mempool_free(pmb, phba->mbox_mem_pool); 1361 mempool_free(pmb, phba->mbox_mem_pool);
1362
1363 /* Drop the reference count from the mbox at the end after
1364 * all the current reference to the ndlp have been done.
1365 */
1366 lpfc_nlp_put(ndlp);
1359 return; 1367 return;
1360} 1368}
1361 1369
@@ -1463,9 +1471,8 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1463 * registered the port. 1471 * registered the port.
1464 */ 1472 */
1465 if (ndlp->rport && ndlp->rport->dd_data && 1473 if (ndlp->rport && ndlp->rport->dd_data &&
1466 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) { 1474 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
1467 lpfc_nlp_put(ndlp); 1475 lpfc_nlp_put(ndlp);
1468 }
1469 1476
1470 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 1477 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1471 "rport add: did:x%x flg:x%x type x%x", 1478 "rport add: did:x%x flg:x%x type x%x",
@@ -1660,6 +1667,18 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1660} 1667}
1661 1668
1662void 1669void
1670lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1671{
1672 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1673
1674 if (list_empty(&ndlp->nlp_listp)) {
1675 spin_lock_irq(shost->host_lock);
1676 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1677 spin_unlock_irq(shost->host_lock);
1678 }
1679}
1680
1681void
1663lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 1682lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1664{ 1683{
1665 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1684 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
@@ -1672,7 +1691,80 @@ lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1672 list_del_init(&ndlp->nlp_listp); 1691 list_del_init(&ndlp->nlp_listp);
1673 spin_unlock_irq(shost->host_lock); 1692 spin_unlock_irq(shost->host_lock);
1674 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 1693 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1675 NLP_STE_UNUSED_NODE); 1694 NLP_STE_UNUSED_NODE);
1695}
1696
1697static void
1698lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1699{
1700 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1701 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1702 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1703 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1704 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1705 NLP_STE_UNUSED_NODE);
1706}
1707
1708struct lpfc_nodelist *
1709lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1710 int state)
1711{
1712 struct lpfc_hba *phba = vport->phba;
1713 uint32_t did;
1714 unsigned long flags;
1715
1716 if (!ndlp)
1717 return NULL;
1718
1719 spin_lock_irqsave(&phba->ndlp_lock, flags);
1720 /* The ndlp should not be in memory free mode */
1721 if (NLP_CHK_FREE_REQ(ndlp)) {
1722 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1723 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
1724 "0277 lpfc_enable_node: ndlp:x%p "
1725 "usgmap:x%x refcnt:%d\n",
1726 (void *)ndlp, ndlp->nlp_usg_map,
1727 atomic_read(&ndlp->kref.refcount));
1728 return NULL;
1729 }
1730 /* The ndlp should not already be in active mode */
1731 if (NLP_CHK_NODE_ACT(ndlp)) {
1732 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1733 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
1734 "0278 lpfc_enable_node: ndlp:x%p "
1735 "usgmap:x%x refcnt:%d\n",
1736 (void *)ndlp, ndlp->nlp_usg_map,
1737 atomic_read(&ndlp->kref.refcount));
1738 return NULL;
1739 }
1740
1741 /* Keep the original DID */
1742 did = ndlp->nlp_DID;
1743
1744 /* re-initialize ndlp except of ndlp linked list pointer */
1745 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
1746 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
1747 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
1748 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
1749 init_timer(&ndlp->nlp_delayfunc);
1750 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
1751 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
1752 ndlp->nlp_DID = did;
1753 ndlp->vport = vport;
1754 ndlp->nlp_sid = NLP_NO_SID;
1755 /* ndlp management re-initialize */
1756 kref_init(&ndlp->kref);
1757 NLP_INT_NODE_ACT(ndlp);
1758
1759 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
1760
1761 if (state != NLP_STE_UNUSED_NODE)
1762 lpfc_nlp_set_state(vport, ndlp, state);
1763
1764 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1765 "node enable: did:x%x",
1766 ndlp->nlp_DID, 0, 0);
1767 return ndlp;
1676} 1768}
1677 1769
1678void 1770void
@@ -1972,7 +2064,21 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1972 "Data: x%x x%x x%x\n", 2064 "Data: x%x x%x x%x\n",
1973 ndlp->nlp_DID, ndlp->nlp_flag, 2065 ndlp->nlp_DID, ndlp->nlp_flag,
1974 ndlp->nlp_state, ndlp->nlp_rpi); 2066 ndlp->nlp_state, ndlp->nlp_rpi);
1975 lpfc_dequeue_node(vport, ndlp); 2067 if (NLP_CHK_FREE_REQ(ndlp)) {
2068 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2069 "0280 lpfc_cleanup_node: ndlp:x%p "
2070 "usgmap:x%x refcnt:%d\n",
2071 (void *)ndlp, ndlp->nlp_usg_map,
2072 atomic_read(&ndlp->kref.refcount));
2073 lpfc_dequeue_node(vport, ndlp);
2074 } else {
2075 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
2076 "0281 lpfc_cleanup_node: ndlp:x%p "
2077 "usgmap:x%x refcnt:%d\n",
2078 (void *)ndlp, ndlp->nlp_usg_map,
2079 atomic_read(&ndlp->kref.refcount));
2080 lpfc_disable_node(vport, ndlp);
2081 }
1976 2082
1977 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 2083 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1978 if ((mb = phba->sli.mbox_active)) { 2084 if ((mb = phba->sli.mbox_active)) {
@@ -1994,12 +2100,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1994 } 2100 }
1995 list_del(&mb->list); 2101 list_del(&mb->list);
1996 mempool_free(mb, phba->mbox_mem_pool); 2102 mempool_free(mb, phba->mbox_mem_pool);
1997 lpfc_nlp_put(ndlp); 2103 /* We shall not invoke the lpfc_nlp_put to decrement
2104 * the ndlp reference count as we are in the process
2105 * of lpfc_nlp_release.
2106 */
1998 } 2107 }
1999 } 2108 }
2000 spin_unlock_irq(&phba->hbalock); 2109 spin_unlock_irq(&phba->hbalock);
2001 2110
2002 lpfc_els_abort(phba,ndlp); 2111 lpfc_els_abort(phba, ndlp);
2112
2003 spin_lock_irq(shost->host_lock); 2113 spin_lock_irq(shost->host_lock);
2004 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 2114 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2005 spin_unlock_irq(shost->host_lock); 2115 spin_unlock_irq(shost->host_lock);
@@ -2057,7 +2167,6 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2057 } 2167 }
2058 } 2168 }
2059 } 2169 }
2060
2061 lpfc_cleanup_node(vport, ndlp); 2170 lpfc_cleanup_node(vport, ndlp);
2062 2171
2063 /* 2172 /*
@@ -2182,7 +2291,16 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2182 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2291 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2183 spin_unlock_irq(shost->host_lock); 2292 spin_unlock_irq(shost->host_lock);
2184 return ndlp; 2293 return ndlp;
2294 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
2295 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
2296 if (!ndlp)
2297 return NULL;
2298 spin_lock_irq(shost->host_lock);
2299 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2300 spin_unlock_irq(shost->host_lock);
2301 return ndlp;
2185 } 2302 }
2303
2186 if (vport->fc_flag & FC_RSCN_MODE) { 2304 if (vport->fc_flag & FC_RSCN_MODE) {
2187 if (lpfc_rscn_payload_check(vport, did)) { 2305 if (lpfc_rscn_payload_check(vport, did)) {
2188 /* If we've already recieved a PLOGI from this NPort 2306 /* If we've already recieved a PLOGI from this NPort
@@ -2363,6 +2481,7 @@ lpfc_disc_start(struct lpfc_vport *vport)
2363 * continue discovery. 2481 * continue discovery.
2364 */ 2482 */
2365 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2483 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2484 !(vport->fc_flag & FC_PT2PT) &&
2366 !(vport->fc_flag & FC_RSCN_MODE)) { 2485 !(vport->fc_flag & FC_RSCN_MODE)) {
2367 lpfc_issue_reg_vpi(phba, vport); 2486 lpfc_issue_reg_vpi(phba, vport);
2368 return; 2487 return;
@@ -2485,6 +2604,8 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
2485 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { 2604 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
2486 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2605 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2487 nlp_listp) { 2606 nlp_listp) {
2607 if (!NLP_CHK_NODE_ACT(ndlp))
2608 continue;
2488 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 2609 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
2489 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 2610 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
2490 lpfc_free_tx(phba, ndlp); 2611 lpfc_free_tx(phba, ndlp);
@@ -2572,6 +2693,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2572 /* Start discovery by sending FLOGI, clean up old rpis */ 2693 /* Start discovery by sending FLOGI, clean up old rpis */
2573 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 2694 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2574 nlp_listp) { 2695 nlp_listp) {
2696 if (!NLP_CHK_NODE_ACT(ndlp))
2697 continue;
2575 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 2698 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2576 continue; 2699 continue;
2577 if (ndlp->nlp_type & NLP_FABRIC) { 2700 if (ndlp->nlp_type & NLP_FABRIC) {
@@ -2618,7 +2741,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2618 "NameServer login\n"); 2741 "NameServer login\n");
2619 /* Next look for NameServer ndlp */ 2742 /* Next look for NameServer ndlp */
2620 ndlp = lpfc_findnode_did(vport, NameServer_DID); 2743 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2621 if (ndlp) 2744 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2622 lpfc_els_abort(phba, ndlp); 2745 lpfc_els_abort(phba, ndlp);
2623 2746
2624 /* ReStart discovery */ 2747 /* ReStart discovery */
@@ -2897,6 +3020,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2897 ndlp->nlp_sid = NLP_NO_SID; 3020 ndlp->nlp_sid = NLP_NO_SID;
2898 INIT_LIST_HEAD(&ndlp->nlp_listp); 3021 INIT_LIST_HEAD(&ndlp->nlp_listp);
2899 kref_init(&ndlp->kref); 3022 kref_init(&ndlp->kref);
3023 NLP_INT_NODE_ACT(ndlp);
2900 3024
2901 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 3025 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2902 "node init: did:x%x", 3026 "node init: did:x%x",
@@ -2911,6 +3035,8 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2911static void 3035static void
2912lpfc_nlp_release(struct kref *kref) 3036lpfc_nlp_release(struct kref *kref)
2913{ 3037{
3038 struct lpfc_hba *phba;
3039 unsigned long flags;
2914 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 3040 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2915 kref); 3041 kref);
2916 3042
@@ -2918,8 +3044,24 @@ lpfc_nlp_release(struct kref *kref)
2918 "node release: did:x%x flg:x%x type:x%x", 3044 "node release: did:x%x flg:x%x type:x%x",
2919 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3045 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2920 3046
3047 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3048 "0279 lpfc_nlp_release: ndlp:x%p "
3049 "usgmap:x%x refcnt:%d\n",
3050 (void *)ndlp, ndlp->nlp_usg_map,
3051 atomic_read(&ndlp->kref.refcount));
3052
3053 /* remove ndlp from action. */
2921 lpfc_nlp_remove(ndlp->vport, ndlp); 3054 lpfc_nlp_remove(ndlp->vport, ndlp);
2922 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool); 3055
3056 /* clear the ndlp active flag for all release cases */
3057 phba = ndlp->vport->phba;
3058 spin_lock_irqsave(&phba->ndlp_lock, flags);
3059 NLP_CLR_NODE_ACT(ndlp);
3060 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3061
3062 /* free ndlp memory for final ndlp release */
3063 if (NLP_CHK_FREE_REQ(ndlp))
3064 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2923} 3065}
2924 3066
2925/* This routine bumps the reference count for a ndlp structure to ensure 3067/* This routine bumps the reference count for a ndlp structure to ensure
@@ -2929,37 +3071,108 @@ lpfc_nlp_release(struct kref *kref)
2929struct lpfc_nodelist * 3071struct lpfc_nodelist *
2930lpfc_nlp_get(struct lpfc_nodelist *ndlp) 3072lpfc_nlp_get(struct lpfc_nodelist *ndlp)
2931{ 3073{
3074 struct lpfc_hba *phba;
3075 unsigned long flags;
3076
2932 if (ndlp) { 3077 if (ndlp) {
2933 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 3078 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2934 "node get: did:x%x flg:x%x refcnt:x%x", 3079 "node get: did:x%x flg:x%x refcnt:x%x",
2935 ndlp->nlp_DID, ndlp->nlp_flag, 3080 ndlp->nlp_DID, ndlp->nlp_flag,
2936 atomic_read(&ndlp->kref.refcount)); 3081 atomic_read(&ndlp->kref.refcount));
2937 kref_get(&ndlp->kref); 3082 /* The check of ndlp usage to prevent incrementing the
3083 * ndlp reference count that is in the process of being
3084 * released.
3085 */
3086 phba = ndlp->vport->phba;
3087 spin_lock_irqsave(&phba->ndlp_lock, flags);
3088 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
3089 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3090 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3091 "0276 lpfc_nlp_get: ndlp:x%p "
3092 "usgmap:x%x refcnt:%d\n",
3093 (void *)ndlp, ndlp->nlp_usg_map,
3094 atomic_read(&ndlp->kref.refcount));
3095 return NULL;
3096 } else
3097 kref_get(&ndlp->kref);
3098 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
2938 } 3099 }
2939 return ndlp; 3100 return ndlp;
2940} 3101}
2941 3102
2942
2943/* This routine decrements the reference count for a ndlp structure. If the 3103/* This routine decrements the reference count for a ndlp structure. If the
2944 * count goes to 0, this indicates the the associated nodelist should be freed. 3104 * count goes to 0, this indicates the the associated nodelist should be
3105 * freed. Returning 1 indicates the ndlp resource has been released; on the
3106 * other hand, returning 0 indicates the ndlp resource has not been released
3107 * yet.
2945 */ 3108 */
2946int 3109int
2947lpfc_nlp_put(struct lpfc_nodelist *ndlp) 3110lpfc_nlp_put(struct lpfc_nodelist *ndlp)
2948{ 3111{
2949 if (ndlp) { 3112 struct lpfc_hba *phba;
2950 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 3113 unsigned long flags;
2951 "node put: did:x%x flg:x%x refcnt:x%x", 3114
2952 ndlp->nlp_DID, ndlp->nlp_flag, 3115 if (!ndlp)
2953 atomic_read(&ndlp->kref.refcount)); 3116 return 1;
3117
3118 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
3119 "node put: did:x%x flg:x%x refcnt:x%x",
3120 ndlp->nlp_DID, ndlp->nlp_flag,
3121 atomic_read(&ndlp->kref.refcount));
3122 phba = ndlp->vport->phba;
3123 spin_lock_irqsave(&phba->ndlp_lock, flags);
3124 /* Check the ndlp memory free acknowledge flag to avoid the
3125 * possible race condition that kref_put got invoked again
3126 * after previous one has done ndlp memory free.
3127 */
3128 if (NLP_CHK_FREE_ACK(ndlp)) {
3129 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3130 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3131 "0274 lpfc_nlp_put: ndlp:x%p "
3132 "usgmap:x%x refcnt:%d\n",
3133 (void *)ndlp, ndlp->nlp_usg_map,
3134 atomic_read(&ndlp->kref.refcount));
3135 return 1;
2954 } 3136 }
2955 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; 3137 /* Check the ndlp inactivate log flag to avoid the possible
3138 * race condition that kref_put got invoked again after ndlp
3139 * is already in inactivating state.
3140 */
3141 if (NLP_CHK_IACT_REQ(ndlp)) {
3142 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3143 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
3144 "0275 lpfc_nlp_put: ndlp:x%p "
3145 "usgmap:x%x refcnt:%d\n",
3146 (void *)ndlp, ndlp->nlp_usg_map,
3147 atomic_read(&ndlp->kref.refcount));
3148 return 1;
3149 }
3150 /* For last put, mark the ndlp usage flags to make sure no
3151 * other kref_get and kref_put on the same ndlp shall get
3152 * in between the process when the final kref_put has been
3153 * invoked on this ndlp.
3154 */
3155 if (atomic_read(&ndlp->kref.refcount) == 1) {
3156 /* Indicate ndlp is put to inactive state. */
3157 NLP_SET_IACT_REQ(ndlp);
3158 /* Acknowledge ndlp memory free has been seen. */
3159 if (NLP_CHK_FREE_REQ(ndlp))
3160 NLP_SET_FREE_ACK(ndlp);
3161 }
3162 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
3163 /* Note, the kref_put returns 1 when decrementing a reference
3164 * count that was 1, it invokes the release callback function,
3165 * but it still left the reference count as 1 (not actually
3166 * performs the last decrementation). Otherwise, it actually
3167 * decrements the reference count and returns 0.
3168 */
3169 return kref_put(&ndlp->kref, lpfc_nlp_release);
2956} 3170}
2957 3171
2958/* This routine free's the specified nodelist if it is not in use 3172/* This routine free's the specified nodelist if it is not in use
2959 * by any other discovery thread. This routine returns 1 if the ndlp 3173 * by any other discovery thread. This routine returns 1 if the
2960 * is not being used by anyone and has been freed. A return value of 3174 * ndlp has been freed. A return value of 0 indicates the ndlp is
2961 * 0 indicates it is being used by another discovery thread and the 3175 * not yet been released.
2962 * refcount is left unchanged.
2963 */ 3176 */
2964int 3177int
2965lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) 3178lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
@@ -2968,11 +3181,8 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
2968 "node not used: did:x%x flg:x%x refcnt:x%x", 3181 "node not used: did:x%x flg:x%x refcnt:x%x",
2969 ndlp->nlp_DID, ndlp->nlp_flag, 3182 ndlp->nlp_DID, ndlp->nlp_flag,
2970 atomic_read(&ndlp->kref.refcount)); 3183 atomic_read(&ndlp->kref.refcount));
2971 3184 if (atomic_read(&ndlp->kref.refcount) == 1)
2972 if (atomic_read(&ndlp->kref.refcount) == 1) { 3185 if (lpfc_nlp_put(ndlp))
2973 lpfc_nlp_put(ndlp); 3186 return 1;
2974 return 1;
2975 }
2976 return 0; 3187 return 0;
2977} 3188}
2978
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 041f83e7634a..7773b949aa7c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -581,6 +581,7 @@ struct ls_rjt { /* Structure is in Big Endian format */
581#define LSEXP_INVALID_O_SID 0x15 581#define LSEXP_INVALID_O_SID 0x15
582#define LSEXP_INVALID_OX_RX 0x17 582#define LSEXP_INVALID_OX_RX 0x17
583#define LSEXP_CMD_IN_PROGRESS 0x19 583#define LSEXP_CMD_IN_PROGRESS 0x19
584#define LSEXP_PORT_LOGIN_REQ 0x1E
584#define LSEXP_INVALID_NPORT_ID 0x1F 585#define LSEXP_INVALID_NPORT_ID 0x1F
585#define LSEXP_INVALID_SEQ_ID 0x21 586#define LSEXP_INVALID_SEQ_ID 0x21
586#define LSEXP_INVALID_XCHG 0x23 587#define LSEXP_INVALID_XCHG 0x23
@@ -1376,11 +1377,26 @@ typedef struct { /* FireFly BIU registers */
1376#define CMD_QUE_XRI64_CX 0xB3 1377#define CMD_QUE_XRI64_CX 0xB3
1377#define CMD_IOCB_RCV_SEQ64_CX 0xB5 1378#define CMD_IOCB_RCV_SEQ64_CX 0xB5
1378#define CMD_IOCB_RCV_ELS64_CX 0xB7 1379#define CMD_IOCB_RCV_ELS64_CX 0xB7
1380#define CMD_IOCB_RET_XRI64_CX 0xB9
1379#define CMD_IOCB_RCV_CONT64_CX 0xBB 1381#define CMD_IOCB_RCV_CONT64_CX 0xBB
1380 1382
1381#define CMD_GEN_REQUEST64_CR 0xC2 1383#define CMD_GEN_REQUEST64_CR 0xC2
1382#define CMD_GEN_REQUEST64_CX 0xC3 1384#define CMD_GEN_REQUEST64_CX 0xC3
1383 1385
1386/* Unhandled SLI-3 Commands */
1387#define CMD_IOCB_XMIT_MSEQ64_CR 0xB0
1388#define CMD_IOCB_XMIT_MSEQ64_CX 0xB1
1389#define CMD_IOCB_RCV_SEQ_LIST64_CX 0xC1
1390#define CMD_IOCB_RCV_ELS_LIST64_CX 0xCD
1391#define CMD_IOCB_CLOSE_EXTENDED_CN 0xB6
1392#define CMD_IOCB_ABORT_EXTENDED_CN 0xBA
1393#define CMD_IOCB_RET_HBQE64_CN 0xCA
1394#define CMD_IOCB_FCP_IBIDIR64_CR 0xAC
1395#define CMD_IOCB_FCP_IBIDIR64_CX 0xAD
1396#define CMD_IOCB_FCP_ITASKMGT64_CX 0xAF
1397#define CMD_IOCB_LOGENTRY_CN 0x94
1398#define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96
1399
1384#define CMD_MAX_IOCB_CMD 0xE6 1400#define CMD_MAX_IOCB_CMD 0xE6
1385#define CMD_IOCB_MASK 0xff 1401#define CMD_IOCB_MASK 0xff
1386 1402
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6cfeba7454d4..22843751c2ca 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -461,11 +461,21 @@ lpfc_config_port_post(struct lpfc_hba *phba)
461int 461int
462lpfc_hba_down_prep(struct lpfc_hba *phba) 462lpfc_hba_down_prep(struct lpfc_hba *phba)
463{ 463{
464 struct lpfc_vport **vports;
465 int i;
464 /* Disable interrupts */ 466 /* Disable interrupts */
465 writel(0, phba->HCregaddr); 467 writel(0, phba->HCregaddr);
466 readl(phba->HCregaddr); /* flush */ 468 readl(phba->HCregaddr); /* flush */
467 469
468 lpfc_cleanup_discovery_resources(phba->pport); 470 if (phba->pport->load_flag & FC_UNLOADING)
471 lpfc_cleanup_discovery_resources(phba->pport);
472 else {
473 vports = lpfc_create_vport_work_array(phba);
474 if (vports != NULL)
475 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
476 lpfc_cleanup_discovery_resources(vports[i]);
477 lpfc_destroy_vport_work_array(phba, vports);
478 }
469 return 0; 479 return 0;
470} 480}
471 481
@@ -1422,9 +1432,32 @@ lpfc_cleanup(struct lpfc_vport *vport)
1422 lpfc_port_link_failure(vport); 1432 lpfc_port_link_failure(vport);
1423 1433
1424 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1434 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1435 if (!NLP_CHK_NODE_ACT(ndlp)) {
1436 ndlp = lpfc_enable_node(vport, ndlp,
1437 NLP_STE_UNUSED_NODE);
1438 if (!ndlp)
1439 continue;
1440 spin_lock_irq(&phba->ndlp_lock);
1441 NLP_SET_FREE_REQ(ndlp);
1442 spin_unlock_irq(&phba->ndlp_lock);
1443 /* Trigger the release of the ndlp memory */
1444 lpfc_nlp_put(ndlp);
1445 continue;
1446 }
1447 spin_lock_irq(&phba->ndlp_lock);
1448 if (NLP_CHK_FREE_REQ(ndlp)) {
1449 /* The ndlp should not be in memory free mode already */
1450 spin_unlock_irq(&phba->ndlp_lock);
1451 continue;
1452 } else
1453 /* Indicate request for freeing ndlp memory */
1454 NLP_SET_FREE_REQ(ndlp);
1455 spin_unlock_irq(&phba->ndlp_lock);
1456
1425 if (ndlp->nlp_type & NLP_FABRIC) 1457 if (ndlp->nlp_type & NLP_FABRIC)
1426 lpfc_disc_state_machine(vport, ndlp, NULL, 1458 lpfc_disc_state_machine(vport, ndlp, NULL,
1427 NLP_EVT_DEVICE_RECOVERY); 1459 NLP_EVT_DEVICE_RECOVERY);
1460
1428 lpfc_disc_state_machine(vport, ndlp, NULL, 1461 lpfc_disc_state_machine(vport, ndlp, NULL,
1429 NLP_EVT_DEVICE_RM); 1462 NLP_EVT_DEVICE_RM);
1430 } 1463 }
@@ -1438,6 +1471,17 @@ lpfc_cleanup(struct lpfc_vport *vport)
1438 if (i++ > 3000) { 1471 if (i++ > 3000) {
1439 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1472 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1440 "0233 Nodelist not empty\n"); 1473 "0233 Nodelist not empty\n");
1474 list_for_each_entry_safe(ndlp, next_ndlp,
1475 &vport->fc_nodes, nlp_listp) {
1476 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
1477 LOG_NODE,
1478 "0282: did:x%x ndlp:x%p "
1479 "usgmap:x%x refcnt:%d\n",
1480 ndlp->nlp_DID, (void *)ndlp,
1481 ndlp->nlp_usg_map,
1482 atomic_read(
1483 &ndlp->kref.refcount));
1484 }
1441 break; 1485 break;
1442 } 1486 }
1443 1487
@@ -1586,6 +1630,8 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1586 list_for_each_entry_safe(ndlp, next_ndlp, 1630 list_for_each_entry_safe(ndlp, next_ndlp,
1587 &vports[i]->fc_nodes, 1631 &vports[i]->fc_nodes,
1588 nlp_listp) { 1632 nlp_listp) {
1633 if (!NLP_CHK_NODE_ACT(ndlp))
1634 continue;
1589 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1635 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1590 continue; 1636 continue;
1591 if (ndlp->nlp_type & NLP_FABRIC) { 1637 if (ndlp->nlp_type & NLP_FABRIC) {
@@ -1695,9 +1741,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
1695 1741
1696 vport = (struct lpfc_vport *) shost->hostdata; 1742 vport = (struct lpfc_vport *) shost->hostdata;
1697 vport->phba = phba; 1743 vport->phba = phba;
1698
1699 vport->load_flag |= FC_LOADING; 1744 vport->load_flag |= FC_LOADING;
1700 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 1745 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1746 vport->fc_rscn_flush = 0;
1701 1747
1702 lpfc_get_vport_cfgparam(vport); 1748 lpfc_get_vport_cfgparam(vport);
1703 shost->unique_id = instance; 1749 shost->unique_id = instance;
@@ -1879,6 +1925,42 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
1879 spin_unlock_irq(shost->host_lock); 1925 spin_unlock_irq(shost->host_lock);
1880} 1926}
1881 1927
1928static int
1929lpfc_enable_msix(struct lpfc_hba *phba)
1930{
1931 int error;
1932
1933 phba->msix_entries[0].entry = 0;
1934 phba->msix_entries[0].vector = 0;
1935
1936 error = pci_enable_msix(phba->pcidev, phba->msix_entries,
1937 ARRAY_SIZE(phba->msix_entries));
1938 if (error) {
1939 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1940 "0420 Enable MSI-X failed (%d), continuing "
1941 "with MSI\n", error);
1942 pci_disable_msix(phba->pcidev);
1943 return error;
1944 }
1945
1946 error = request_irq(phba->msix_entries[0].vector, lpfc_intr_handler, 0,
1947 LPFC_DRIVER_NAME, phba);
1948 if (error) {
1949 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1950 "0421 MSI-X request_irq failed (%d), "
1951 "continuing with MSI\n", error);
1952 pci_disable_msix(phba->pcidev);
1953 }
1954 return error;
1955}
1956
1957static void
1958lpfc_disable_msix(struct lpfc_hba *phba)
1959{
1960 free_irq(phba->msix_entries[0].vector, phba);
1961 pci_disable_msix(phba->pcidev);
1962}
1963
1882static int __devinit 1964static int __devinit
1883lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1965lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1884{ 1966{
@@ -1905,6 +1987,9 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1905 1987
1906 spin_lock_init(&phba->hbalock); 1988 spin_lock_init(&phba->hbalock);
1907 1989
1990 /* Initialize ndlp management spinlock */
1991 spin_lock_init(&phba->ndlp_lock);
1992
1908 phba->pcidev = pdev; 1993 phba->pcidev = pdev;
1909 1994
1910 /* Assign an unused board number */ 1995 /* Assign an unused board number */
@@ -2002,6 +2087,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2002 2087
2003 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); 2088 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
2004 2089
2090 INIT_LIST_HEAD(&phba->hbqbuf_in_list);
2091
2005 /* Initialize the SLI Layer to run with lpfc HBAs. */ 2092 /* Initialize the SLI Layer to run with lpfc HBAs. */
2006 lpfc_sli_setup(phba); 2093 lpfc_sli_setup(phba);
2007 lpfc_sli_queue_setup(phba); 2094 lpfc_sli_queue_setup(phba);
@@ -2077,24 +2164,36 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2077 lpfc_debugfs_initialize(vport); 2164 lpfc_debugfs_initialize(vport);
2078 2165
2079 pci_set_drvdata(pdev, shost); 2166 pci_set_drvdata(pdev, shost);
2167 phba->intr_type = NONE;
2080 2168
2081 if (phba->cfg_use_msi) { 2169 if (phba->cfg_use_msi == 2) {
2170 error = lpfc_enable_msix(phba);
2171 if (!error)
2172 phba->intr_type = MSIX;
2173 }
2174
2175 /* Fallback to MSI if MSI-X initialization failed */
2176 if (phba->cfg_use_msi >= 1 && phba->intr_type == NONE) {
2082 retval = pci_enable_msi(phba->pcidev); 2177 retval = pci_enable_msi(phba->pcidev);
2083 if (!retval) 2178 if (!retval)
2084 phba->using_msi = 1; 2179 phba->intr_type = MSI;
2085 else 2180 else
2086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2181 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2087 "0452 Enable MSI failed, continuing " 2182 "0452 Enable MSI failed, continuing "
2088 "with IRQ\n"); 2183 "with IRQ\n");
2089 } 2184 }
2090 2185
2091 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 2186 /* MSI-X is the only case the doesn't need to call request_irq */
2092 LPFC_DRIVER_NAME, phba); 2187 if (phba->intr_type != MSIX) {
2093 if (retval) { 2188 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2189 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2095 "0451 Enable interrupt handler failed\n"); 2190 if (retval) {
2096 error = retval; 2191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0451 Enable "
2097 goto out_disable_msi; 2192 "interrupt handler failed\n");
2193 error = retval;
2194 goto out_disable_msi;
2195 } else if (phba->intr_type != MSI)
2196 phba->intr_type = INTx;
2098 } 2197 }
2099 2198
2100 phba->MBslimaddr = phba->slim_memmap_p; 2199 phba->MBslimaddr = phba->slim_memmap_p;
@@ -2139,9 +2238,14 @@ out_remove_device:
2139out_free_irq: 2238out_free_irq:
2140 lpfc_stop_phba_timers(phba); 2239 lpfc_stop_phba_timers(phba);
2141 phba->pport->work_port_events = 0; 2240 phba->pport->work_port_events = 0;
2142 free_irq(phba->pcidev->irq, phba); 2241
2242 if (phba->intr_type == MSIX)
2243 lpfc_disable_msix(phba);
2244 else
2245 free_irq(phba->pcidev->irq, phba);
2246
2143out_disable_msi: 2247out_disable_msi:
2144 if (phba->using_msi) 2248 if (phba->intr_type == MSI)
2145 pci_disable_msi(phba->pcidev); 2249 pci_disable_msi(phba->pcidev);
2146 destroy_port(vport); 2250 destroy_port(vport);
2147out_kthread_stop: 2251out_kthread_stop:
@@ -2214,10 +2318,13 @@ lpfc_pci_remove_one(struct pci_dev *pdev)
2214 2318
2215 lpfc_debugfs_terminate(vport); 2319 lpfc_debugfs_terminate(vport);
2216 2320
2217 /* Release the irq reservation */ 2321 if (phba->intr_type == MSIX)
2218 free_irq(phba->pcidev->irq, phba); 2322 lpfc_disable_msix(phba);
2219 if (phba->using_msi) 2323 else {
2220 pci_disable_msi(phba->pcidev); 2324 free_irq(phba->pcidev->irq, phba);
2325 if (phba->intr_type == MSI)
2326 pci_disable_msi(phba->pcidev);
2327 }
2221 2328
2222 pci_set_drvdata(pdev, NULL); 2329 pci_set_drvdata(pdev, NULL);
2223 scsi_host_put(shost); 2330 scsi_host_put(shost);
@@ -2276,10 +2383,13 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
2276 pring = &psli->ring[psli->fcp_ring]; 2383 pring = &psli->ring[psli->fcp_ring];
2277 lpfc_sli_abort_iocb_ring(phba, pring); 2384 lpfc_sli_abort_iocb_ring(phba, pring);
2278 2385
2279 /* Release the irq reservation */ 2386 if (phba->intr_type == MSIX)
2280 free_irq(phba->pcidev->irq, phba); 2387 lpfc_disable_msix(phba);
2281 if (phba->using_msi) 2388 else {
2282 pci_disable_msi(phba->pcidev); 2389 free_irq(phba->pcidev->irq, phba);
2390 if (phba->intr_type == MSI)
2391 pci_disable_msi(phba->pcidev);
2392 }
2283 2393
2284 /* Request a slot reset. */ 2394 /* Request a slot reset. */
2285 return PCI_ERS_RESULT_NEED_RESET; 2395 return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index c5841d7565f7..39fd2b843bec 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -35,11 +35,15 @@
35#define LOG_ALL_MSG 0xffff /* LOG all messages */ 35#define LOG_ALL_MSG 0xffff /* LOG all messages */
36 36
37#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 37#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
38 do { \
38 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ 39 { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \
39 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ 40 dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
40 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } 41 fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
42 } while (0)
41 43
42#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 44#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
45 do { \
43 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ 46 { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \
44 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ 47 dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
45 fmt, phba->brd_no, ##arg); } 48 fmt, phba->brd_no, ##arg); } \
49 } while (0)
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 6dc5ab8d6716..3c0cebc71800 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -264,19 +264,30 @@ void
264lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) 264lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
265{ 265{
266 struct hbq_dmabuf *hbq_entry; 266 struct hbq_dmabuf *hbq_entry;
267 unsigned long flags;
268
269 if (!mp)
270 return;
267 271
268 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 272 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
273 /* Check whether HBQ is still in use */
274 spin_lock_irqsave(&phba->hbalock, flags);
275 if (!phba->hbq_in_use) {
276 spin_unlock_irqrestore(&phba->hbalock, flags);
277 return;
278 }
269 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf); 279 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
280 list_del(&hbq_entry->dbuf.list);
270 if (hbq_entry->tag == -1) { 281 if (hbq_entry->tag == -1) {
271 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 282 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
272 (phba, hbq_entry); 283 (phba, hbq_entry);
273 } else { 284 } else {
274 lpfc_sli_free_hbq(phba, hbq_entry); 285 lpfc_sli_free_hbq(phba, hbq_entry);
275 } 286 }
287 spin_unlock_irqrestore(&phba->hbalock, flags);
276 } else { 288 } else {
277 lpfc_mbuf_free(phba, mp->virt, mp->phys); 289 lpfc_mbuf_free(phba, mp->virt, mp->phys);
278 kfree(mp); 290 kfree(mp);
279 } 291 }
280 return; 292 return;
281} 293}
282
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 4a0e3406e37a..d513813f6697 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -249,6 +249,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
250 struct lpfc_hba *phba = vport->phba; 250 struct lpfc_hba *phba = vport->phba;
251 struct lpfc_dmabuf *pcmd; 251 struct lpfc_dmabuf *pcmd;
252 struct lpfc_work_evt *evtp;
252 uint32_t *lp; 253 uint32_t *lp;
253 IOCB_t *icmd; 254 IOCB_t *icmd;
254 struct serv_parm *sp; 255 struct serv_parm *sp;
@@ -435,8 +436,14 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
435 del_timer_sync(&ndlp->nlp_delayfunc); 436 del_timer_sync(&ndlp->nlp_delayfunc);
436 ndlp->nlp_last_elscmd = 0; 437 ndlp->nlp_last_elscmd = 0;
437 438
438 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 439 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) {
439 list_del_init(&ndlp->els_retry_evt.evt_listp); 440 list_del_init(&ndlp->els_retry_evt.evt_listp);
441 /* Decrement ndlp reference count held for the
442 * delayed retry
443 */
444 evtp = &ndlp->els_retry_evt;
445 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
446 }
440 447
441 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 448 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
442 spin_lock_irq(shost->host_lock); 449 spin_lock_irq(shost->host_lock);
@@ -638,13 +645,15 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
638 return 0; 645 return 0;
639 } 646 }
640 647
641 /* Check config parameter use-adisc or FCP-2 */ 648 if (!(vport->fc_flag & FC_PT2PT)) {
642 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || 649 /* Check config parameter use-adisc or FCP-2 */
643 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { 650 if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
644 spin_lock_irq(shost->host_lock); 651 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
645 ndlp->nlp_flag |= NLP_NPR_ADISC; 652 spin_lock_irq(shost->host_lock);
646 spin_unlock_irq(shost->host_lock); 653 ndlp->nlp_flag |= NLP_NPR_ADISC;
647 return 1; 654 spin_unlock_irq(shost->host_lock);
655 return 1;
656 }
648 } 657 }
649 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 658 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
650 lpfc_unreg_rpi(vport, ndlp); 659 lpfc_unreg_rpi(vport, ndlp);
@@ -656,7 +665,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
656 void *arg, uint32_t evt) 665 void *arg, uint32_t evt)
657{ 666{
658 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 667 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
659 "0253 Illegal State Transition: node x%x " 668 "0271 Illegal State Transition: node x%x "
660 "event x%x, state x%x Data: x%x x%x\n", 669 "event x%x, state x%x Data: x%x x%x\n",
661 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 670 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
662 ndlp->nlp_flag); 671 ndlp->nlp_flag);
@@ -674,7 +683,7 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
674 */ 683 */
675 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { 684 if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
676 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 685 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
677 "0253 Illegal State Transition: node x%x " 686 "0272 Illegal State Transition: node x%x "
678 "event x%x, state x%x Data: x%x x%x\n", 687 "event x%x, state x%x Data: x%x x%x\n",
679 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 688 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
680 ndlp->nlp_flag); 689 ndlp->nlp_flag);
@@ -2144,8 +2153,11 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2144 uint32_t cur_state, rc; 2153 uint32_t cur_state, rc;
2145 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, 2154 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2146 uint32_t); 2155 uint32_t);
2156 uint32_t got_ndlp = 0;
2157
2158 if (lpfc_nlp_get(ndlp))
2159 got_ndlp = 1;
2147 2160
2148 lpfc_nlp_get(ndlp);
2149 cur_state = ndlp->nlp_state; 2161 cur_state = ndlp->nlp_state;
2150 2162
2151 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2163 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
@@ -2162,15 +2174,24 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2162 rc = (func) (vport, ndlp, arg, evt); 2174 rc = (func) (vport, ndlp, arg, evt);
2163 2175
2164 /* DSM out state <rc> on NPort <nlp_DID> */ 2176 /* DSM out state <rc> on NPort <nlp_DID> */
2165 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2177 if (got_ndlp) {
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2166 "0212 DSM out state %d on NPort x%x Data: x%x\n", 2179 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2167 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2180 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2168 2181
2169 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, 2182 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2170 "DSM out: ste:%d did:x%x flg:x%x", 2183 "DSM out: ste:%d did:x%x flg:x%x",
2171 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2184 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2185 /* Decrement the ndlp reference count held for this function */
2186 lpfc_nlp_put(ndlp);
2187 } else {
2188 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2189 "0212 DSM out state %d on NPort free\n", rc);
2172 2190
2173 lpfc_nlp_put(ndlp); 2191 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2192 "DSM out: ste:%d did:x%x flg:x%x",
2193 rc, 0, 0);
2194 }
2174 2195
2175 return rc; 2196 return rc;
2176} 2197}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index fc5c3a42b05a..70255c11d3ad 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1283,6 +1283,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1283 match = 0; 1283 match = 0;
1284 spin_lock_irq(shost->host_lock); 1284 spin_lock_irq(shost->host_lock);
1285 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1285 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1286 if (!NLP_CHK_NODE_ACT(ndlp))
1287 continue;
1286 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1288 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1287 i == ndlp->nlp_sid && 1289 i == ndlp->nlp_sid &&
1288 ndlp->rport) { 1290 ndlp->rport) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index fdd01e384e36..fc0d9501aba6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -203,8 +203,25 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
203 case CMD_IOCB_RCV_SEQ64_CX: 203 case CMD_IOCB_RCV_SEQ64_CX:
204 case CMD_IOCB_RCV_ELS64_CX: 204 case CMD_IOCB_RCV_ELS64_CX:
205 case CMD_IOCB_RCV_CONT64_CX: 205 case CMD_IOCB_RCV_CONT64_CX:
206 case CMD_IOCB_RET_XRI64_CX:
206 type = LPFC_UNSOL_IOCB; 207 type = LPFC_UNSOL_IOCB;
207 break; 208 break;
209 case CMD_IOCB_XMIT_MSEQ64_CR:
210 case CMD_IOCB_XMIT_MSEQ64_CX:
211 case CMD_IOCB_RCV_SEQ_LIST64_CX:
212 case CMD_IOCB_RCV_ELS_LIST64_CX:
213 case CMD_IOCB_CLOSE_EXTENDED_CN:
214 case CMD_IOCB_ABORT_EXTENDED_CN:
215 case CMD_IOCB_RET_HBQE64_CN:
216 case CMD_IOCB_FCP_IBIDIR64_CR:
217 case CMD_IOCB_FCP_IBIDIR64_CX:
218 case CMD_IOCB_FCP_ITASKMGT64_CX:
219 case CMD_IOCB_LOGENTRY_CN:
220 case CMD_IOCB_LOGENTRY_ASYNC_CN:
221 printk("%s - Unhandled SLI-3 Command x%x\n",
222 __FUNCTION__, iocb_cmnd);
223 type = LPFC_UNKNOWN_IOCB;
224 break;
208 default: 225 default:
209 type = LPFC_UNKNOWN_IOCB; 226 type = LPFC_UNKNOWN_IOCB;
210 break; 227 break;
@@ -529,10 +546,13 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
529{ 546{
530 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 547 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
531 struct hbq_dmabuf *hbq_buf; 548 struct hbq_dmabuf *hbq_buf;
549 unsigned long flags;
532 int i, hbq_count; 550 int i, hbq_count;
551 uint32_t hbqno;
533 552
534 hbq_count = lpfc_sli_hbq_count(); 553 hbq_count = lpfc_sli_hbq_count();
535 /* Return all memory used by all HBQs */ 554 /* Return all memory used by all HBQs */
555 spin_lock_irqsave(&phba->hbalock, flags);
536 for (i = 0; i < hbq_count; ++i) { 556 for (i = 0; i < hbq_count; ++i) {
537 list_for_each_entry_safe(dmabuf, next_dmabuf, 557 list_for_each_entry_safe(dmabuf, next_dmabuf,
538 &phba->hbqs[i].hbq_buffer_list, list) { 558 &phba->hbqs[i].hbq_buffer_list, list) {
@@ -542,6 +562,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
542 } 562 }
543 phba->hbqs[i].buffer_count = 0; 563 phba->hbqs[i].buffer_count = 0;
544 } 564 }
565 /* Return all HBQ buffer that are in-fly */
566 list_for_each_entry_safe(dmabuf, next_dmabuf,
567 &phba->hbqbuf_in_list, list) {
568 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
569 list_del(&hbq_buf->dbuf.list);
570 if (hbq_buf->tag == -1) {
571 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
572 (phba, hbq_buf);
573 } else {
574 hbqno = hbq_buf->tag >> 16;
575 if (hbqno >= LPFC_MAX_HBQS)
576 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
577 (phba, hbq_buf);
578 else
579 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
580 hbq_buf);
581 }
582 }
583
584 /* Mark the HBQs not in use */
585 phba->hbq_in_use = 0;
586 spin_unlock_irqrestore(&phba->hbalock, flags);
545} 587}
546 588
547static struct lpfc_hbq_entry * 589static struct lpfc_hbq_entry *
@@ -603,30 +645,40 @@ static int
603lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 645lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
604{ 646{
605 uint32_t i, start, end; 647 uint32_t i, start, end;
648 unsigned long flags;
606 struct hbq_dmabuf *hbq_buffer; 649 struct hbq_dmabuf *hbq_buffer;
607 650
608 if (!phba->hbqs[hbqno].hbq_alloc_buffer) { 651 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
609 return 0; 652 return 0;
610 }
611 653
612 start = phba->hbqs[hbqno].buffer_count; 654 start = phba->hbqs[hbqno].buffer_count;
613 end = count + start; 655 end = count + start;
614 if (end > lpfc_hbq_defs[hbqno]->entry_count) { 656 if (end > lpfc_hbq_defs[hbqno]->entry_count)
615 end = lpfc_hbq_defs[hbqno]->entry_count; 657 end = lpfc_hbq_defs[hbqno]->entry_count;
616 } 658
659 /* Check whether HBQ is still in use */
660 spin_lock_irqsave(&phba->hbalock, flags);
661 if (!phba->hbq_in_use)
662 goto out;
617 663
618 /* Populate HBQ entries */ 664 /* Populate HBQ entries */
619 for (i = start; i < end; i++) { 665 for (i = start; i < end; i++) {
620 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 666 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
621 if (!hbq_buffer) 667 if (!hbq_buffer)
622 return 1; 668 goto err;
623 hbq_buffer->tag = (i | (hbqno << 16)); 669 hbq_buffer->tag = (i | (hbqno << 16));
624 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 670 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
625 phba->hbqs[hbqno].buffer_count++; 671 phba->hbqs[hbqno].buffer_count++;
626 else 672 else
627 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 673 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
628 } 674 }
675
676 out:
677 spin_unlock_irqrestore(&phba->hbalock, flags);
629 return 0; 678 return 0;
679 err:
680 spin_unlock_irqrestore(&phba->hbalock, flags);
681 return 1;
630} 682}
631 683
632int 684int
@@ -910,16 +962,29 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
910 uint32_t hbqno; 962 uint32_t hbqno;
911 void *virt; /* virtual address ptr */ 963 void *virt; /* virtual address ptr */
912 dma_addr_t phys; /* mapped address */ 964 dma_addr_t phys; /* mapped address */
965 unsigned long flags;
966
967 /* Check whether HBQ is still in use */
968 spin_lock_irqsave(&phba->hbalock, flags);
969 if (!phba->hbq_in_use) {
970 spin_unlock_irqrestore(&phba->hbalock, flags);
971 return NULL;
972 }
913 973
914 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 974 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
915 if (hbq_entry == NULL) 975 if (hbq_entry == NULL) {
976 spin_unlock_irqrestore(&phba->hbalock, flags);
916 return NULL; 977 return NULL;
978 }
917 list_del(&hbq_entry->dbuf.list); 979 list_del(&hbq_entry->dbuf.list);
918 980
919 hbqno = tag >> 16; 981 hbqno = tag >> 16;
920 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 982 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
921 if (new_hbq_entry == NULL) 983 if (new_hbq_entry == NULL) {
984 list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
985 spin_unlock_irqrestore(&phba->hbalock, flags);
922 return &hbq_entry->dbuf; 986 return &hbq_entry->dbuf;
987 }
923 new_hbq_entry->tag = -1; 988 new_hbq_entry->tag = -1;
924 phys = new_hbq_entry->dbuf.phys; 989 phys = new_hbq_entry->dbuf.phys;
925 virt = new_hbq_entry->dbuf.virt; 990 virt = new_hbq_entry->dbuf.virt;
@@ -928,6 +993,9 @@ lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
928 hbq_entry->dbuf.phys = phys; 993 hbq_entry->dbuf.phys = phys;
929 hbq_entry->dbuf.virt = virt; 994 hbq_entry->dbuf.virt = virt;
930 lpfc_sli_free_hbq(phba, hbq_entry); 995 lpfc_sli_free_hbq(phba, hbq_entry);
996 list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list);
997 spin_unlock_irqrestore(&phba->hbalock, flags);
998
931 return &new_hbq_entry->dbuf; 999 return &new_hbq_entry->dbuf;
932} 1000}
933 1001
@@ -951,6 +1019,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
951 uint32_t Rctl, Type; 1019 uint32_t Rctl, Type;
952 uint32_t match, i; 1020 uint32_t match, i;
953 struct lpfc_iocbq *iocbq; 1021 struct lpfc_iocbq *iocbq;
1022 struct lpfc_dmabuf *dmzbuf;
954 1023
955 match = 0; 1024 match = 0;
956 irsp = &(saveq->iocb); 1025 irsp = &(saveq->iocb);
@@ -972,6 +1041,29 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
972 return 1; 1041 return 1;
973 } 1042 }
974 1043
1044 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
1045 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
1046 if (irsp->ulpBdeCount > 0) {
1047 dmzbuf = lpfc_sli_get_buff(phba, pring,
1048 irsp->un.ulpWord[3]);
1049 lpfc_in_buf_free(phba, dmzbuf);
1050 }
1051
1052 if (irsp->ulpBdeCount > 1) {
1053 dmzbuf = lpfc_sli_get_buff(phba, pring,
1054 irsp->unsli3.sli3Words[3]);
1055 lpfc_in_buf_free(phba, dmzbuf);
1056 }
1057
1058 if (irsp->ulpBdeCount > 2) {
1059 dmzbuf = lpfc_sli_get_buff(phba, pring,
1060 irsp->unsli3.sli3Words[7]);
1061 lpfc_in_buf_free(phba, dmzbuf);
1062 }
1063
1064 return 1;
1065 }
1066
975 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1067 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
976 if (irsp->ulpBdeCount != 0) { 1068 if (irsp->ulpBdeCount != 0) {
977 saveq->context2 = lpfc_sli_get_buff(phba, pring, 1069 saveq->context2 = lpfc_sli_get_buff(phba, pring,
@@ -2293,6 +2385,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2293 2385
2294 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 2386 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2295 phba->link_state = LPFC_INIT_MBX_CMDS; 2387 phba->link_state = LPFC_INIT_MBX_CMDS;
2388 phba->hbq_in_use = 1;
2296 2389
2297 hbq_entry_index = 0; 2390 hbq_entry_index = 0;
2298 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 2391 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
@@ -2404,9 +2497,7 @@ lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2404 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 2497 if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2405 (!pmb->mb.un.varCfgPort.cMA)) { 2498 (!pmb->mb.un.varCfgPort.cMA)) {
2406 rc = -ENXIO; 2499 rc = -ENXIO;
2407 goto do_prep_failed;
2408 } 2500 }
2409 return rc;
2410 2501
2411do_prep_failed: 2502do_prep_failed:
2412 mempool_free(pmb, phba->mbox_mem_pool); 2503 mempool_free(pmb, phba->mbox_mem_pool);
@@ -2625,14 +2716,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2625 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2716 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2626 2717
2627 /* Mbox command <mbxCommand> cannot issue */ 2718 /* Mbox command <mbxCommand> cannot issue */
2628 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) 2719 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2629 return MBX_NOT_FINISHED; 2720 return MBX_NOT_FINISHED;
2630 } 2721 }
2631 2722
2632 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2723 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2633 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2724 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2634 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2725 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2635 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) 2726 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2636 return MBX_NOT_FINISHED; 2727 return MBX_NOT_FINISHED;
2637 } 2728 }
2638 2729
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4b633d39a82a..ca540d1d041e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.2.4" 21#define LPFC_DRIVER_VERSION "8.2.5"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 9fad7663c117..86d05beb00b8 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -327,7 +327,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
327 * up and ready to FDISC. 327 * up and ready to FDISC.
328 */ 328 */
329 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 329 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
330 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 330 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
331 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
331 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 332 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
332 lpfc_set_disctmo(vport); 333 lpfc_set_disctmo(vport);
333 lpfc_initial_fdisc(vport); 334 lpfc_initial_fdisc(vport);
@@ -358,7 +359,8 @@ disable_vport(struct fc_vport *fc_vport)
358 long timeout; 359 long timeout;
359 360
360 ndlp = lpfc_findnode_did(vport, Fabric_DID); 361 ndlp = lpfc_findnode_did(vport, Fabric_DID);
361 if (ndlp && phba->link_state >= LPFC_LINK_UP) { 362 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
363 && phba->link_state >= LPFC_LINK_UP) {
362 vport->unreg_vpi_cmpl = VPORT_INVAL; 364 vport->unreg_vpi_cmpl = VPORT_INVAL;
363 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 365 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
364 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 366 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
@@ -372,6 +374,8 @@ disable_vport(struct fc_vport *fc_vport)
372 * calling lpfc_cleanup_rpis(vport, 1) 374 * calling lpfc_cleanup_rpis(vport, 1)
373 */ 375 */
374 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 376 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
377 if (!NLP_CHK_NODE_ACT(ndlp))
378 continue;
375 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 379 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
376 continue; 380 continue;
377 lpfc_disc_state_machine(vport, ndlp, NULL, 381 lpfc_disc_state_machine(vport, ndlp, NULL,
@@ -414,7 +418,8 @@ enable_vport(struct fc_vport *fc_vport)
414 * up and ready to FDISC. 418 * up and ready to FDISC.
415 */ 419 */
416 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 420 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
417 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 421 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
422 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
418 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 423 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
419 lpfc_set_disctmo(vport); 424 lpfc_set_disctmo(vport);
420 lpfc_initial_fdisc(vport); 425 lpfc_initial_fdisc(vport);
@@ -498,7 +503,41 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
498 scsi_remove_host(lpfc_shost_from_vport(vport)); 503 scsi_remove_host(lpfc_shost_from_vport(vport));
499 504
500 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 505 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
501 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 506
507 /* In case of driver unload, we shall not perform fabric logo as the
508 * worker thread already stopped at this stage and, in this case, we
509 * can safely skip the fabric logo.
510 */
511 if (phba->pport->load_flag & FC_UNLOADING) {
512 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
513 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
514 phba->link_state >= LPFC_LINK_UP) {
515 /* First look for the Fabric ndlp */
516 ndlp = lpfc_findnode_did(vport, Fabric_DID);
517 if (!ndlp)
518 goto skip_logo;
519 else if (!NLP_CHK_NODE_ACT(ndlp)) {
520 ndlp = lpfc_enable_node(vport, ndlp,
521 NLP_STE_UNUSED_NODE);
522 if (!ndlp)
523 goto skip_logo;
524 }
525 /* Remove ndlp from vport npld list */
526 lpfc_dequeue_node(vport, ndlp);
527
528 /* Indicate free memory when release */
529 spin_lock_irq(&phba->ndlp_lock);
530 NLP_SET_FREE_REQ(ndlp);
531 spin_unlock_irq(&phba->ndlp_lock);
532 /* Kick off release ndlp when it can be safely done */
533 lpfc_nlp_put(ndlp);
534 }
535 goto skip_logo;
536 }
537
538 /* Otherwise, we will perform fabric logo as needed */
539 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
540 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
502 phba->link_state >= LPFC_LINK_UP) { 541 phba->link_state >= LPFC_LINK_UP) {
503 if (vport->cfg_enable_da_id) { 542 if (vport->cfg_enable_da_id) {
504 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 543 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
@@ -519,8 +558,27 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
519 if (!ndlp) 558 if (!ndlp)
520 goto skip_logo; 559 goto skip_logo;
521 lpfc_nlp_init(vport, ndlp, Fabric_DID); 560 lpfc_nlp_init(vport, ndlp, Fabric_DID);
561 /* Indicate free memory when release */
562 NLP_SET_FREE_REQ(ndlp);
522 } else { 563 } else {
564 if (!NLP_CHK_NODE_ACT(ndlp))
565 ndlp = lpfc_enable_node(vport, ndlp,
566 NLP_STE_UNUSED_NODE);
567 if (!ndlp)
568 goto skip_logo;
569
570 /* Remove ndlp from vport npld list */
523 lpfc_dequeue_node(vport, ndlp); 571 lpfc_dequeue_node(vport, ndlp);
572 spin_lock_irq(&phba->ndlp_lock);
573 if (!NLP_CHK_FREE_REQ(ndlp))
574 /* Indicate free memory when release */
575 NLP_SET_FREE_REQ(ndlp);
576 else {
577 /* Skip this if ndlp is already in free mode */
578 spin_unlock_irq(&phba->ndlp_lock);
579 goto skip_logo;
580 }
581 spin_unlock_irq(&phba->ndlp_lock);
524 } 582 }
525 vport->unreg_vpi_cmpl = VPORT_INVAL; 583 vport->unreg_vpi_cmpl = VPORT_INVAL;
526 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 584 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
@@ -534,9 +592,9 @@ skip_logo:
534 lpfc_sli_host_down(vport); 592 lpfc_sli_host_down(vport);
535 593
536 lpfc_stop_vport_timers(vport); 594 lpfc_stop_vport_timers(vport);
537 lpfc_unreg_all_rpis(vport);
538 595
539 if (!(phba->pport->load_flag & FC_UNLOADING)) { 596 if (!(phba->pport->load_flag & FC_UNLOADING)) {
597 lpfc_unreg_all_rpis(vport);
540 lpfc_unreg_default_rpis(vport); 598 lpfc_unreg_default_rpis(vport);
541 /* 599 /*
542 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) 600 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi)
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 4d59ae8491a4..b135a1ed4b2c 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -151,19 +151,19 @@ mega_setup_mailbox(adapter_t *adapter)
151 */ 151 */
152 if( adapter->flag & BOARD_IOMAP ) { 152 if( adapter->flag & BOARD_IOMAP ) {
153 153
154 outb_p(adapter->mbox_dma & 0xFF, 154 outb(adapter->mbox_dma & 0xFF,
155 adapter->host->io_port + MBOX_PORT0); 155 adapter->host->io_port + MBOX_PORT0);
156 156
157 outb_p((adapter->mbox_dma >> 8) & 0xFF, 157 outb((adapter->mbox_dma >> 8) & 0xFF,
158 adapter->host->io_port + MBOX_PORT1); 158 adapter->host->io_port + MBOX_PORT1);
159 159
160 outb_p((adapter->mbox_dma >> 16) & 0xFF, 160 outb((adapter->mbox_dma >> 16) & 0xFF,
161 adapter->host->io_port + MBOX_PORT2); 161 adapter->host->io_port + MBOX_PORT2);
162 162
163 outb_p((adapter->mbox_dma >> 24) & 0xFF, 163 outb((adapter->mbox_dma >> 24) & 0xFF,
164 adapter->host->io_port + MBOX_PORT3); 164 adapter->host->io_port + MBOX_PORT3);
165 165
166 outb_p(ENABLE_MBOX_BYTE, 166 outb(ENABLE_MBOX_BYTE,
167 adapter->host->io_port + ENABLE_MBOX_REGION); 167 adapter->host->io_port + ENABLE_MBOX_REGION);
168 168
169 irq_ack(adapter); 169 irq_ack(adapter);
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index b6587a6d8486..0ad215e27b83 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -59,7 +59,6 @@ EXPORT_SYMBOL(mraid_mm_register_adp);
59EXPORT_SYMBOL(mraid_mm_unregister_adp); 59EXPORT_SYMBOL(mraid_mm_unregister_adp);
60EXPORT_SYMBOL(mraid_mm_adapter_app_handle); 60EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
61 61
62static int majorno;
63static uint32_t drvr_ver = 0x02200207; 62static uint32_t drvr_ver = 0x02200207;
64 63
65static int adapters_count_g; 64static int adapters_count_g;
@@ -76,6 +75,12 @@ static const struct file_operations lsi_fops = {
76 .owner = THIS_MODULE, 75 .owner = THIS_MODULE,
77}; 76};
78 77
78static struct miscdevice megaraid_mm_dev = {
79 .minor = MISC_DYNAMIC_MINOR,
80 .name = "megadev0",
81 .fops = &lsi_fops,
82};
83
79/** 84/**
80 * mraid_mm_open - open routine for char node interface 85 * mraid_mm_open - open routine for char node interface
81 * @inode : unused 86 * @inode : unused
@@ -1184,15 +1189,16 @@ mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1184static int __init 1189static int __init
1185mraid_mm_init(void) 1190mraid_mm_init(void)
1186{ 1191{
1192 int err;
1193
1187 // Announce the driver version 1194 // Announce the driver version
1188 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", 1195 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1189 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); 1196 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1190 1197
1191 majorno = register_chrdev(0, "megadev", &lsi_fops); 1198 err = misc_register(&megaraid_mm_dev);
1192 1199 if (err < 0) {
1193 if (majorno < 0) { 1200 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1194 con_log(CL_ANN, ("megaraid cmm: cannot get major\n")); 1201 return err;
1195 return majorno;
1196 } 1202 }
1197 1203
1198 init_waitqueue_head(&wait_q); 1204 init_waitqueue_head(&wait_q);
@@ -1230,7 +1236,7 @@ mraid_mm_exit(void)
1230{ 1236{
1231 con_log(CL_DLEVEL1 , ("exiting common mod\n")); 1237 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1232 1238
1233 unregister_chrdev(majorno, "megadev"); 1239 misc_deregister(&megaraid_mm_dev);
1234} 1240}
1235 1241
1236module_init(mraid_mm_init); 1242module_init(mraid_mm_init);
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index c8762b2b8ed1..55b425c0a654 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -22,6 +22,7 @@
22#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/miscdevice.h>
25 26
26#include "mbox_defs.h" 27#include "mbox_defs.h"
27#include "megaraid_ioctl.h" 28#include "megaraid_ioctl.h"
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 651d09b08f2a..fd63b06d9ef1 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1759,6 +1759,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
1759 1759
1760 switch (mesg.event) { 1760 switch (mesg.event) {
1761 case PM_EVENT_SUSPEND: 1761 case PM_EVENT_SUSPEND:
1762 case PM_EVENT_HIBERNATE:
1762 case PM_EVENT_FREEZE: 1763 case PM_EVENT_FREEZE:
1763 break; 1764 break;
1764 default: 1765 default:
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
new file mode 100644
index 000000000000..d4a6ac3c9c47
--- /dev/null
+++ b/drivers/scsi/mvsas.c
@@ -0,0 +1,2970 @@
1/*
2 mvsas.c - Marvell 88SE6440 SAS/SATA support
3
4 Copyright 2007 Red Hat, Inc.
5 Copyright 2008 Marvell. <kewei@marvell.com>
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2,
10 or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty
14 of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 See the GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public
18 License along with this program; see the file COPYING. If not,
19 write to the Free Software Foundation, 675 Mass Ave, Cambridge,
20 MA 02139, USA.
21
22 ---------------------------------------------------------------
23
24 Random notes:
25 * hardware supports controlling the endian-ness of data
26 structures. this permits elimination of all the le32_to_cpu()
27 and cpu_to_le32() conversions.
28
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35#include <linux/spinlock.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/ctype.h>
39#include <scsi/libsas.h>
40#include <asm/io.h>
41
42#define DRV_NAME "mvsas"
43#define DRV_VERSION "0.5"
44#define _MV_DUMP 0
45#define MVS_DISABLE_NVRAM
46#define MVS_DISABLE_MSI
47
48#define mr32(reg) readl(regs + MVS_##reg)
49#define mw32(reg,val) writel((val), regs + MVS_##reg)
50#define mw32_f(reg,val) do { \
51 writel((val), regs + MVS_##reg); \
52 readl(regs + MVS_##reg); \
53 } while (0)
54
55#define MVS_ID_NOT_MAPPED 0xff
56#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
57
58/* offset for D2H FIS in the Received FIS List Structure */
59#define SATA_RECEIVED_D2H_FIS(reg_set) \
60 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
61#define SATA_RECEIVED_PIO_FIS(reg_set) \
62 ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
63#define UNASSOC_D2H_FIS(id) \
64 ((void *) mvi->rx_fis + 0x100 * id)
65
66#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
67 for ((__mc) = (__lseq_mask), (__lseq) = 0; \
68 (__mc) != 0 && __rest; \
69 (++__lseq), (__mc) >>= 1)
70
71/* driver compile-time configuration */
72enum driver_configuration {
73 MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
74 MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
75 /* software requires power-of-2
76 ring size */
77
78 MVS_SLOTS = 512, /* command slots */
79 MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
80 MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
81 MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
82 MVS_OAF_SZ = 64, /* Open address frame buffer size */
83
84 MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
85
86 MVS_QUEUE_SIZE = 30, /* Support Queue depth */
87};
88
89/* unchangeable hardware details */
90enum hardware_details {
91 MVS_MAX_PHYS = 8, /* max. possible phys */
92 MVS_MAX_PORTS = 8, /* max. possible ports */
93 MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
94};
95
96/* peripheral registers (BAR2) */
97enum peripheral_registers {
98 SPI_CTL = 0x10, /* EEPROM control */
99 SPI_CMD = 0x14, /* EEPROM command */
100 SPI_DATA = 0x18, /* EEPROM data */
101};
102
103enum peripheral_register_bits {
104 TWSI_RDY = (1U << 7), /* EEPROM interface ready */
105 TWSI_RD = (1U << 4), /* EEPROM read access */
106
107 SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
108};
109
110/* enhanced mode registers (BAR4) */
111enum hw_registers {
112 MVS_GBL_CTL = 0x04, /* global control */
113 MVS_GBL_INT_STAT = 0x08, /* global irq status */
114 MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
115 MVS_GBL_PORT_TYPE = 0xa0, /* port type */
116
117 MVS_CTL = 0x100, /* SAS/SATA port configuration */
118 MVS_PCS = 0x104, /* SAS/SATA port control/status */
119 MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
120 MVS_CMD_LIST_HI = 0x10C,
121 MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
122 MVS_RX_FIS_HI = 0x114,
123
124 MVS_TX_CFG = 0x120, /* TX configuration */
125 MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
126 MVS_TX_HI = 0x128,
127
128 MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
129 MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
130 MVS_RX_CFG = 0x134, /* RX configuration */
131 MVS_RX_LO = 0x138, /* RX (completion) ring addr */
132 MVS_RX_HI = 0x13C,
133 MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
134
135 MVS_INT_COAL = 0x148, /* Int coalescing config */
136 MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
137 MVS_INT_STAT = 0x150, /* Central int status */
138 MVS_INT_MASK = 0x154, /* Central int enable */
139 MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
140 MVS_INT_MASK_SRS = 0x15C,
141
142 /* ports 1-3 follow after this */
143 MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
144 MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
145 MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
146 MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
147
148 /* ports 1-3 follow after this */
149 MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
150 MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
151
152 MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
153 MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
154
155 /* ports 1-3 follow after this */
156 MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
157 MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
158 MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
159 MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
160
161 /* ports 1-3 follow after this */
162 MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
163 MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
164 MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
165 MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
166};
167
168enum hw_register_bits {
169 /* MVS_GBL_CTL */
170 INT_EN = (1U << 1), /* Global int enable */
171 HBA_RST = (1U << 0), /* HBA reset */
172
173 /* MVS_GBL_INT_STAT */
174 INT_XOR = (1U << 4), /* XOR engine event */
175 INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
176
177 /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
178 SATA_TARGET = (1U << 16), /* port0 SATA target enable */
179 MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
180 MODE_AUTO_DET_PORT6 = (1U << 14),
181 MODE_AUTO_DET_PORT5 = (1U << 13),
182 MODE_AUTO_DET_PORT4 = (1U << 12),
183 MODE_AUTO_DET_PORT3 = (1U << 11),
184 MODE_AUTO_DET_PORT2 = (1U << 10),
185 MODE_AUTO_DET_PORT1 = (1U << 9),
186 MODE_AUTO_DET_PORT0 = (1U << 8),
187 MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
188 MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
189 MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
190 MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
191 MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
192 MODE_SAS_PORT6_MASK = (1U << 6),
193 MODE_SAS_PORT5_MASK = (1U << 5),
194 MODE_SAS_PORT4_MASK = (1U << 4),
195 MODE_SAS_PORT3_MASK = (1U << 3),
196 MODE_SAS_PORT2_MASK = (1U << 2),
197 MODE_SAS_PORT1_MASK = (1U << 1),
198 MODE_SAS_PORT0_MASK = (1U << 0),
199 MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
200 MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
201 MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
202 MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
203
204 /* SAS_MODE value may be
205 * dictated (in hw) by values
206 * of SATA_TARGET & AUTO_DET
207 */
208
209 /* MVS_TX_CFG */
210 TX_EN = (1U << 16), /* Enable TX */
211 TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
212
213 /* MVS_RX_CFG */
214 RX_EN = (1U << 16), /* Enable RX */
215 RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
216
217 /* MVS_INT_COAL */
218 COAL_EN = (1U << 16), /* Enable int coalescing */
219
220 /* MVS_INT_STAT, MVS_INT_MASK */
221 CINT_I2C = (1U << 31), /* I2C event */
222 CINT_SW0 = (1U << 30), /* software event 0 */
223 CINT_SW1 = (1U << 29), /* software event 1 */
224 CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
225 CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
226 CINT_MEM = (1U << 26), /* int mem parity err */
227 CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
228 CINT_SRS = (1U << 3), /* SRS event */
229 CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
230 CINT_DONE = (1U << 0), /* cmd completion */
231
232 /* shl for ports 1-3 */
233 CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
234 CINT_PORT = (1U << 8), /* port0 event */
235 CINT_PORT_MASK_OFFSET = 8,
236 CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
237
238 /* TX (delivery) ring bits */
239 TXQ_CMD_SHIFT = 29,
240 TXQ_CMD_SSP = 1, /* SSP protocol */
241 TXQ_CMD_SMP = 2, /* SMP protocol */
242 TXQ_CMD_STP = 3, /* STP/SATA protocol */
243 TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
244 TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
245 TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
246 TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
247 TXQ_SRS_SHIFT = 20, /* SATA register set */
248 TXQ_SRS_MASK = 0x7f,
249 TXQ_PHY_SHIFT = 12, /* PHY bitmap */
250 TXQ_PHY_MASK = 0xff,
251 TXQ_SLOT_MASK = 0xfff, /* slot number */
252
253 /* RX (completion) ring bits */
254 RXQ_GOOD = (1U << 23), /* Response good */
255 RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
256 RXQ_CMD_RX = (1U << 20), /* target cmd received */
257 RXQ_ATTN = (1U << 19), /* attention */
258 RXQ_RSP = (1U << 18), /* response frame xfer'd */
259 RXQ_ERR = (1U << 17), /* err info rec xfer'd */
260 RXQ_DONE = (1U << 16), /* cmd complete */
261 RXQ_SLOT_MASK = 0xfff, /* slot number */
262
263 /* mvs_cmd_hdr bits */
264 MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
265 MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
266
267 /* SSP initiator only */
268 MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
269
270 /* SSP initiator or target */
271 MCH_SSP_FR_TASK = 0x1, /* TASK frame */
272
273 /* SSP target only */
274 MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
275 MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
276 MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
277 MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
278
279 MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
280 MCH_FBURST = (1U << 11), /* first burst (SSP) */
281 MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
282 MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
283 MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
284 MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
285 MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
286 MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
287 MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
288 MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
289
290 CCTL_RST = (1U << 5), /* port logic reset */
291
292 /* 0(LSB first), 1(MSB first) */
293 CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
294 CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
295 CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
296 CCTL_ENDIAN_CMD = (1U << 0), /* command table */
297
298 /* MVS_Px_SER_CTLSTAT (per-phy control) */
299 PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
300 PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
301 PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
302 PHY_RST = (1U << 0), /* phy reset */
303 PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
304 PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
305 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
306 PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
307 (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
308 PHY_READY_MASK = (1U << 20),
309
310 /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
311 PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
312 PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
313 PHYEV_AN = (1U << 18), /* SATA async notification */
314 PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
315 PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
316 PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
317 PHYEV_IU_BIG = (1U << 11), /* IU too long err */
318 PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
319 PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
320 PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
321 PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
322 PHYEV_PORT_SEL = (1U << 6), /* port selector present */
323 PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
324 PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
325 PHYEV_ID_FAIL = (1U << 3), /* identify failed */
326 PHYEV_ID_DONE = (1U << 2), /* identify done */
327 PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
328 PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
329
330 /* MVS_PCS */
331 PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
332 PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
333 PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
334 PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
335 PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
336 PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
337 PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
338 PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
339 PCS_CMD_RST = (1U << 1), /* reset cmd issue */
340 PCS_CMD_EN = (1U << 0), /* enable cmd issue */
341
342 /* Port n Attached Device Info */
343 PORT_DEV_SSP_TRGT = (1U << 19),
344 PORT_DEV_SMP_TRGT = (1U << 18),
345 PORT_DEV_STP_TRGT = (1U << 17),
346 PORT_DEV_SSP_INIT = (1U << 11),
347 PORT_DEV_SMP_INIT = (1U << 10),
348 PORT_DEV_STP_INIT = (1U << 9),
349 PORT_PHY_ID_MASK = (0xFFU << 24),
350 PORT_DEV_TRGT_MASK = (0x7U << 17),
351 PORT_DEV_INIT_MASK = (0x7U << 9),
352 PORT_DEV_TYPE_MASK = (0x7U << 0),
353
354 /* Port n PHY Status */
355 PHY_RDY = (1U << 2),
356 PHY_DW_SYNC = (1U << 1),
357 PHY_OOB_DTCTD = (1U << 0),
358
359 /* VSR */
360 /* PHYMODE 6 (CDB) */
361 PHY_MODE6_DTL_SPEED = (1U << 27),
362};
363
364enum mvs_info_flags {
365 MVF_MSI = (1U << 0), /* MSI is enabled */
366 MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
367};
368
369enum sas_cmd_port_registers {
370 CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
371 CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
372 CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
373 CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
374 CMD_OOB_SPACE = 0x110, /* OOB space control register */
375 CMD_OOB_BURST = 0x114, /* OOB burst control register */
376 CMD_PHY_TIMER = 0x118, /* PHY timer control register */
377 CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
378 CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
379 CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
380 CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
381 CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
382 CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
383 CMD_ID_TEST = 0x134, /* ID test register */
384 CMD_PL_TIMER = 0x138, /* PL timer register */
385 CMD_WD_TIMER = 0x13c, /* WD timer register */
386 CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
387 CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
388 CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
389 CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
390 CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
391 CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
392 CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
393 CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
394 CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
395 CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
396 CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
397 CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
398 CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
399 CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
400 CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
401 CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
402 CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
403 CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
404 CMD_RESET_COUNT = 0x188, /* Reset Count */
405 CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
406 CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
407 CMD_PHY_CTL = 0x194, /* PHY Control and Status */
408 CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
409 CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
410 CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
411 CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
412 CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
413 CMD_HOST_CTL = 0x1AC, /* Host Control Status */
414 CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
415 CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
416 CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
417 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
418 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
419 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
420};
421
422/* SAS/SATA configuration port registers, aka phy registers */
423enum sas_sata_config_port_regs {
424 PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
425 PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
426 PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
427 PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
428 PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
429 PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
430 PHYR_SATA_CTL = 0x18, /* SATA control */
431 PHYR_PHY_STAT = 0x1C, /* PHY status */
432 PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
433 PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
434 PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
435 PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
436 PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
437 PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
438 PHYR_WIDE_PORT = 0x38, /* wide port participating */
439 PHYR_CURRENT0 = 0x80, /* current connection info 0 */
440 PHYR_CURRENT1 = 0x84, /* current connection info 1 */
441 PHYR_CURRENT2 = 0x88, /* current connection info 2 */
442};
443
444/* SAS/SATA Vendor Specific Port Registers */
445enum sas_sata_vsp_regs {
446 VSR_PHY_STAT = 0x00, /* Phy Status */
447 VSR_PHY_MODE1 = 0x01, /* phy tx */
448 VSR_PHY_MODE2 = 0x02, /* tx scc */
449 VSR_PHY_MODE3 = 0x03, /* pll */
450 VSR_PHY_MODE4 = 0x04, /* VCO */
451 VSR_PHY_MODE5 = 0x05, /* Rx */
452 VSR_PHY_MODE6 = 0x06, /* CDR */
453 VSR_PHY_MODE7 = 0x07, /* Impedance */
454 VSR_PHY_MODE8 = 0x08, /* Voltage */
455 VSR_PHY_MODE9 = 0x09, /* Test */
456 VSR_PHY_MODE10 = 0x0A, /* Power */
457 VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
458 VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
459 VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
460};
461
462enum pci_cfg_registers {
463 PCR_PHY_CTL = 0x40,
464 PCR_PHY_CTL2 = 0x90,
465 PCR_DEV_CTRL = 0xE8,
466};
467
468enum pci_cfg_register_bits {
469 PCTL_PWR_ON = (0xFU << 24),
470 PCTL_OFF = (0xFU << 12),
471 PRD_REQ_SIZE = (0x4000),
472 PRD_REQ_MASK = (0x00007000),
473};
474
475enum nvram_layout_offsets {
476 NVR_SIG = 0x00, /* 0xAA, 0x55 */
477 NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
478};
479
480enum chip_flavors {
481 chip_6320,
482 chip_6440,
483 chip_6480,
484};
485
486enum port_type {
487 PORT_TYPE_SAS = (1L << 1),
488 PORT_TYPE_SATA = (1L << 0),
489};
490
491/* Command Table Format */
492enum ct_format {
493 /* SSP */
494 SSP_F_H = 0x00,
495 SSP_F_IU = 0x18,
496 SSP_F_MAX = 0x4D,
497 /* STP */
498 STP_CMD_FIS = 0x00,
499 STP_ATAPI_CMD = 0x40,
500 STP_F_MAX = 0x10,
501 /* SMP */
502 SMP_F_T = 0x00,
503 SMP_F_DEP = 0x01,
504 SMP_F_MAX = 0x101,
505};
506
507enum status_buffer {
508 SB_EIR_OFF = 0x00, /* Error Information Record */
509 SB_RFB_OFF = 0x08, /* Response Frame Buffer */
510 SB_RFB_MAX = 0x400, /* RFB size*/
511};
512
513enum error_info_rec {
514 CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
515};
516
517struct mvs_chip_info {
518 u32 n_phy;
519 u32 srs_sz;
520 u32 slot_width;
521};
522
523struct mvs_err_info {
524 __le32 flags;
525 __le32 flags2;
526};
527
528struct mvs_prd {
529 __le64 addr; /* 64-bit buffer address */
530 __le32 reserved;
531 __le32 len; /* 16-bit length */
532};
533
534struct mvs_cmd_hdr {
535 __le32 flags; /* PRD tbl len; SAS, SATA ctl */
536 __le32 lens; /* cmd, max resp frame len */
537 __le32 tags; /* targ port xfer tag; tag */
538 __le32 data_len; /* data xfer len */
539 __le64 cmd_tbl; /* command table address */
540 __le64 open_frame; /* open addr frame address */
541 __le64 status_buf; /* status buffer address */
542 __le64 prd_tbl; /* PRD tbl address */
543 __le32 reserved[4];
544};
545
546struct mvs_slot_info {
547 struct sas_task *task;
548 u32 n_elem;
549 u32 tx;
550
551 /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
552 * and PRD table
553 */
554 void *buf;
555 dma_addr_t buf_dma;
556#if _MV_DUMP
557 u32 cmd_size;
558#endif
559
560 void *response;
561};
562
563struct mvs_port {
564 struct asd_sas_port sas_port;
565 u8 port_attached;
566 u8 taskfileset;
567 u8 wide_port_phymap;
568};
569
570struct mvs_phy {
571 struct mvs_port *port;
572 struct asd_sas_phy sas_phy;
573 struct sas_identify identify;
574 struct scsi_device *sdev;
575 u64 dev_sas_addr;
576 u64 att_dev_sas_addr;
577 u32 att_dev_info;
578 u32 dev_info;
579 u32 phy_type;
580 u32 phy_status;
581 u32 irq_status;
582 u32 frame_rcvd_size;
583 u8 frame_rcvd[32];
584 u8 phy_attached;
585};
586
587struct mvs_info {
588 unsigned long flags;
589
590 spinlock_t lock; /* host-wide lock */
591 struct pci_dev *pdev; /* our device */
592 void __iomem *regs; /* enhanced mode registers */
593 void __iomem *peri_regs; /* peripheral registers */
594
595 u8 sas_addr[SAS_ADDR_SIZE];
596 struct sas_ha_struct sas; /* SCSI/SAS glue */
597 struct Scsi_Host *shost;
598
599 __le32 *tx; /* TX (delivery) DMA ring */
600 dma_addr_t tx_dma;
601 u32 tx_prod; /* cached next-producer idx */
602
603 __le32 *rx; /* RX (completion) DMA ring */
604 dma_addr_t rx_dma;
605 u32 rx_cons; /* RX consumer idx */
606
607 __le32 *rx_fis; /* RX'd FIS area */
608 dma_addr_t rx_fis_dma;
609
610 struct mvs_cmd_hdr *slot; /* DMA command header slots */
611 dma_addr_t slot_dma;
612
613 const struct mvs_chip_info *chip;
614
615 unsigned long tags[MVS_SLOTS];
616 struct mvs_slot_info slot_info[MVS_SLOTS];
617 /* further per-slot information */
618 struct mvs_phy phy[MVS_MAX_PHYS];
619 struct mvs_port port[MVS_MAX_PHYS];
620
621 u32 can_queue; /* per adapter */
622 u32 tag_out; /*Get*/
623 u32 tag_in; /*Give*/
624};
625
626struct mvs_queue_task {
627 struct list_head list;
628
629 void *uldd_task;
630};
631
632static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
633 void *funcdata);
634static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
635static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
636static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
637static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
638static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
639static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
640
641static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
642static void mvs_detect_porttype(struct mvs_info *mvi, int i);
643static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
644
645static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
646static void mvs_scan_start(struct Scsi_Host *);
647static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev);
648
649static struct scsi_transport_template *mvs_stt;
650
651static const struct mvs_chip_info mvs_chips[] = {
652 [chip_6320] = { 2, 16, 9 },
653 [chip_6440] = { 4, 16, 9 },
654 [chip_6480] = { 8, 32, 10 },
655};
656
657static struct scsi_host_template mvs_sht = {
658 .module = THIS_MODULE,
659 .name = DRV_NAME,
660 .queuecommand = sas_queuecommand,
661 .target_alloc = sas_target_alloc,
662 .slave_configure = sas_slave_configure,
663 .slave_destroy = sas_slave_destroy,
664 .scan_finished = mvs_scan_finished,
665 .scan_start = mvs_scan_start,
666 .change_queue_depth = sas_change_queue_depth,
667 .change_queue_type = sas_change_queue_type,
668 .bios_param = sas_bios_param,
669 .can_queue = 1,
670 .cmd_per_lun = 1,
671 .this_id = -1,
672 .sg_tablesize = SG_ALL,
673 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
674 .use_clustering = ENABLE_CLUSTERING,
675 .eh_device_reset_handler = sas_eh_device_reset_handler,
676 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
677 .slave_alloc = mvs_sas_slave_alloc,
678 .target_destroy = sas_target_destroy,
679 .ioctl = sas_ioctl,
680};
681
682static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
683{
684 u32 i;
685 u32 run;
686 u32 offset;
687
688 offset = 0;
689 while (size) {
690 printk("%08X : ", baseaddr + offset);
691 if (size >= 16)
692 run = 16;
693 else
694 run = size;
695 size -= run;
696 for (i = 0; i < 16; i++) {
697 if (i < run)
698 printk("%02X ", (u32)data[i]);
699 else
700 printk(" ");
701 }
702 printk(": ");
703 for (i = 0; i < run; i++)
704 printk("%c", isalnum(data[i]) ? data[i] : '.');
705 printk("\n");
706 data = &data[16];
707 offset += run;
708 }
709 printk("\n");
710}
711
712static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
713 enum sas_protocol proto)
714{
715#if _MV_DUMP
716 u32 offset;
717 struct pci_dev *pdev = mvi->pdev;
718 struct mvs_slot_info *slot = &mvi->slot_info[tag];
719
720 offset = slot->cmd_size + MVS_OAF_SZ +
721 sizeof(struct mvs_prd) * slot->n_elem;
722 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
723 tag);
724 mvs_hexdump(32, (u8 *) slot->response,
725 (u32) slot->buf_dma + offset);
726#endif
727}
728
729static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
730 enum sas_protocol proto)
731{
732#if _MV_DUMP
733 u32 sz, w_ptr, r_ptr;
734 u64 addr;
735 void __iomem *regs = mvi->regs;
736 struct pci_dev *pdev = mvi->pdev;
737 struct mvs_slot_info *slot = &mvi->slot_info[tag];
738
739 /*Delivery Queue */
740 sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
741 w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK;
742 r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK;
743 addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
744 dev_printk(KERN_DEBUG, &pdev->dev,
745 "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n",
746 sz, w_ptr, r_ptr);
747 dev_printk(KERN_DEBUG, &pdev->dev,
748 "Delivery Queue Base Address=0x%llX (PA)"
749 "(tx_dma=0x%llX), Entry=%04d\n",
750 addr, mvi->tx_dma, w_ptr);
751 mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
752 (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
753 /*Command List */
754 addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO);
755 dev_printk(KERN_DEBUG, &pdev->dev,
756 "Command List Base Address=0x%llX (PA)"
757 "(slot_dma=0x%llX), Header=%03d\n",
758 addr, mvi->slot_dma, tag);
759 dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
760 /*mvs_cmd_hdr */
761 mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
762 (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
763 /*1.command table area */
764 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
765 mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
766 /*2.open address frame area */
767 dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
768 mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
769 (u32) slot->buf_dma + slot->cmd_size);
770 /*3.status buffer */
771 mvs_hba_sb_dump(mvi, tag, proto);
772 /*4.PRD table */
773 dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
774 mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
775 (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
776 (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
777#endif
778}
779
780static void mvs_hba_cq_dump(struct mvs_info *mvi)
781{
782#if _MV_DUMP
783 u64 addr;
784 void __iomem *regs = mvi->regs;
785 struct pci_dev *pdev = mvi->pdev;
786 u32 entry = mvi->rx_cons + 1;
787 u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
788
789 /*Completion Queue */
790 addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
791 dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n",
792 (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
793 dev_printk(KERN_DEBUG, &pdev->dev,
794 "Completion List Base Address=0x%llX (PA), "
795 "CQ_Entry=%04d, CQ_WP=0x%08X\n",
796 addr, entry - 1, mvi->rx[0]);
797 mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
798 mvi->rx_dma + sizeof(u32) * entry);
799#endif
800}
801
802static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
803{
804 void __iomem *regs = mvi->regs;
805 u32 tmp;
806
807 tmp = mr32(GBL_CTL);
808
809 mw32(GBL_CTL, tmp | INT_EN);
810}
811
812static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
813{
814 void __iomem *regs = mvi->regs;
815 u32 tmp;
816
817 tmp = mr32(GBL_CTL);
818
819 mw32(GBL_CTL, tmp & ~INT_EN);
820}
821
822static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
823
824/* move to PCI layer or libata core? */
825static int pci_go_64(struct pci_dev *pdev)
826{
827 int rc;
828
829 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
830 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
831 if (rc) {
832 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
833 if (rc) {
834 dev_printk(KERN_ERR, &pdev->dev,
835 "64-bit DMA enable failed\n");
836 return rc;
837 }
838 }
839 } else {
840 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
841 if (rc) {
842 dev_printk(KERN_ERR, &pdev->dev,
843 "32-bit DMA enable failed\n");
844 return rc;
845 }
846 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
847 if (rc) {
848 dev_printk(KERN_ERR, &pdev->dev,
849 "32-bit consistent DMA enable failed\n");
850 return rc;
851 }
852 }
853
854 return rc;
855}
856
857static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
858{
859 mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1);
860 mvi->tags[mvi->tag_in] = tag;
861}
862
863static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
864{
865 mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1);
866}
867
868static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
869{
870 if (mvi->tag_out != mvi->tag_in) {
871 *tag_out = mvi->tags[mvi->tag_out];
872 mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1);
873 return 0;
874 }
875 return -EBUSY;
876}
877
878static void mvs_tag_init(struct mvs_info *mvi)
879{
880 int i;
881 for (i = 0; i < MVS_SLOTS; ++i)
882 mvi->tags[i] = i;
883 mvi->tag_out = 0;
884 mvi->tag_in = MVS_SLOTS - 1;
885}
886
887#ifndef MVS_DISABLE_NVRAM
888static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
889{
890 int timeout = 1000;
891
892 if (addr & ~SPI_ADDR_MASK)
893 return -EINVAL;
894
895 writel(addr, regs + SPI_CMD);
896 writel(TWSI_RD, regs + SPI_CTL);
897
898 while (timeout-- > 0) {
899 if (readl(regs + SPI_CTL) & TWSI_RDY) {
900 *data = readl(regs + SPI_DATA);
901 return 0;
902 }
903
904 udelay(10);
905 }
906
907 return -EBUSY;
908}
909
910static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
911 void *buf, u32 buflen)
912{
913 u32 addr_end, tmp_addr, i, j;
914 u32 tmp = 0;
915 int rc;
916 u8 *tmp8, *buf8 = buf;
917
918 addr_end = addr + buflen;
919 tmp_addr = ALIGN(addr, 4);
920 if (addr > 0xff)
921 return -EINVAL;
922
923 j = addr & 0x3;
924 if (j) {
925 rc = mvs_eep_read(regs, tmp_addr, &tmp);
926 if (rc)
927 return rc;
928
929 tmp8 = (u8 *)&tmp;
930 for (i = j; i < 4; i++)
931 *buf8++ = tmp8[i];
932
933 tmp_addr += 4;
934 }
935
936 for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
937 rc = mvs_eep_read(regs, tmp_addr, &tmp);
938 if (rc)
939 return rc;
940
941 memcpy(buf8, &tmp, 4);
942 buf8 += 4;
943 }
944
945 if (tmp_addr < addr_end) {
946 rc = mvs_eep_read(regs, tmp_addr, &tmp);
947 if (rc)
948 return rc;
949
950 tmp8 = (u8 *)&tmp;
951 j = addr_end - tmp_addr;
952 for (i = 0; i < j; i++)
953 *buf8++ = tmp8[i];
954
955 tmp_addr += 4;
956 }
957
958 return 0;
959}
960#endif
961
962static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
963 void *buf, u32 buflen)
964{
965#ifndef MVS_DISABLE_NVRAM
966 void __iomem *regs = mvi->regs;
967 int rc, i;
968 u32 sum;
969 u8 hdr[2], *tmp;
970 const char *msg;
971
972 rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
973 if (rc) {
974 msg = "nvram hdr read failed";
975 goto err_out;
976 }
977 rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
978 if (rc) {
979 msg = "nvram read failed";
980 goto err_out;
981 }
982
983 if (hdr[0] != 0x5A) {
984 /* entry id */
985 msg = "invalid nvram entry id";
986 rc = -ENOENT;
987 goto err_out;
988 }
989
990 tmp = buf;
991 sum = ((u32)hdr[0]) + ((u32)hdr[1]);
992 for (i = 0; i < buflen; i++)
993 sum += ((u32)tmp[i]);
994
995 if (sum) {
996 msg = "nvram checksum failure";
997 rc = -EILSEQ;
998 goto err_out;
999 }
1000
1001 return 0;
1002
1003err_out:
1004 dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
1005 return rc;
1006#else
1007 /* FIXME , For SAS target mode */
1008 memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8);
1009 return 0;
1010#endif
1011}
1012
1013static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
1014{
1015 struct mvs_phy *phy = &mvi->phy[i];
1016
1017 if (!phy->phy_attached)
1018 return;
1019
1020 if (phy->phy_type & PORT_TYPE_SAS) {
1021 struct sas_identify_frame *id;
1022
1023 id = (struct sas_identify_frame *)phy->frame_rcvd;
1024 id->dev_type = phy->identify.device_type;
1025 id->initiator_bits = SAS_PROTOCOL_ALL;
1026 id->target_bits = phy->identify.target_port_protocols;
1027 } else if (phy->phy_type & PORT_TYPE_SATA) {
1028 /* TODO */
1029 }
1030 mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
1031 mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
1032 PORTE_BYTES_DMAED);
1033}
1034
1035static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
1036{
1037 /* give the phy enabling interrupt event time to come in (1s
1038 * is empirically about all it takes) */
1039 if (time < HZ)
1040 return 0;
1041 /* Wait for discovery to finish */
1042 scsi_flush_work(shost);
1043 return 1;
1044}
1045
1046static void mvs_scan_start(struct Scsi_Host *shost)
1047{
1048 int i;
1049 struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
1050
1051 for (i = 0; i < mvi->chip->n_phy; ++i) {
1052 mvs_bytes_dmaed(mvi, i);
1053 }
1054}
1055
1056static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev)
1057{
1058 int rc;
1059
1060 rc = sas_slave_alloc(scsi_dev);
1061
1062 return rc;
1063}
1064
1065static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events)
1066{
1067 struct pci_dev *pdev = mvi->pdev;
1068 struct sas_ha_struct *sas_ha = &mvi->sas;
1069 struct mvs_phy *phy = &mvi->phy[port_no];
1070 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1071
1072 phy->irq_status = mvs_read_port_irq_stat(mvi, port_no);
1073 /*
1074 * events is port event now ,
1075 * we need check the interrupt status which belongs to per port.
1076 */
1077 dev_printk(KERN_DEBUG, &pdev->dev,
1078 "Port %d Event = %X\n",
1079 port_no, phy->irq_status);
1080
1081 if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
1082 if (!mvs_is_phy_ready(mvi, port_no)) {
1083 sas_phy_disconnected(sas_phy);
1084 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
1085 } else
1086 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
1087 }
1088 if (!(phy->irq_status & PHYEV_DEC_ERR)) {
1089 if (phy->irq_status & PHYEV_COMWAKE) {
1090 u32 tmp = mvs_read_port_irq_mask(mvi, port_no);
1091 mvs_write_port_irq_mask(mvi, port_no,
1092 tmp | PHYEV_SIG_FIS);
1093 }
1094 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
1095 phy->phy_status = mvs_is_phy_ready(mvi, port_no);
1096 if (phy->phy_status) {
1097 mvs_detect_porttype(mvi, port_no);
1098
1099 if (phy->phy_type & PORT_TYPE_SATA) {
1100 u32 tmp = mvs_read_port_irq_mask(mvi,
1101 port_no);
1102 tmp &= ~PHYEV_SIG_FIS;
1103 mvs_write_port_irq_mask(mvi,
1104 port_no, tmp);
1105 }
1106
1107 mvs_update_phyinfo(mvi, port_no, 0);
1108 sas_ha->notify_phy_event(sas_phy,
1109 PHYE_OOB_DONE);
1110 mvs_bytes_dmaed(mvi, port_no);
1111 } else {
1112 dev_printk(KERN_DEBUG, &pdev->dev,
1113 "plugin interrupt but phy is gone\n");
1114 mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
1115 NULL);
1116 }
1117 } else if (phy->irq_status & PHYEV_BROAD_CH)
1118 sas_ha->notify_port_event(sas_phy,
1119 PORTE_BROADCAST_RCVD);
1120 }
1121 mvs_write_port_irq_stat(mvi, port_no, phy->irq_status);
1122}
1123
1124static void mvs_int_sata(struct mvs_info *mvi)
1125{
1126 /* FIXME */
1127}
1128
1129static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
1130 struct mvs_slot_info *slot, u32 slot_idx)
1131{
1132 if (!sas_protocol_ata(task->task_proto))
1133 if (slot->n_elem)
1134 pci_unmap_sg(mvi->pdev, task->scatter,
1135 slot->n_elem, task->data_dir);
1136
1137 switch (task->task_proto) {
1138 case SAS_PROTOCOL_SMP:
1139 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
1140 PCI_DMA_FROMDEVICE);
1141 pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
1142 PCI_DMA_TODEVICE);
1143 break;
1144
1145 case SAS_PROTOCOL_SATA:
1146 case SAS_PROTOCOL_STP:
1147 case SAS_PROTOCOL_SSP:
1148 default:
1149 /* do nothing */
1150 break;
1151 }
1152
1153 slot->task = NULL;
1154 mvs_tag_clear(mvi, slot_idx);
1155}
1156
1157static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1158 u32 slot_idx)
1159{
1160 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1161 u64 err_dw0 = *(u32 *) slot->response;
1162 void __iomem *regs = mvi->regs;
1163 u32 tmp;
1164
1165 if (err_dw0 & CMD_ISS_STPD)
1166 if (sas_protocol_ata(task->task_proto)) {
1167 tmp = mr32(INT_STAT_SRS);
1168 mw32(INT_STAT_SRS, tmp & 0xFFFF);
1169 }
1170
1171 mvs_hba_sb_dump(mvi, slot_idx, task->task_proto);
1172}
1173
1174static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
1175{
1176 u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1177 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1178 struct sas_task *task = slot->task;
1179 struct task_status_struct *tstat = &task->task_status;
1180 struct mvs_port *port = &mvi->port[task->dev->port->id];
1181 bool aborted;
1182 void *to;
1183
1184 spin_lock(&task->task_state_lock);
1185 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1186 if (!aborted) {
1187 task->task_state_flags &=
1188 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1189 task->task_state_flags |= SAS_TASK_STATE_DONE;
1190 }
1191 spin_unlock(&task->task_state_lock);
1192
1193 if (aborted)
1194 return -1;
1195
1196 memset(tstat, 0, sizeof(*tstat));
1197 tstat->resp = SAS_TASK_COMPLETE;
1198
1199
1200 if (unlikely(!port->port_attached)) {
1201 tstat->stat = SAS_PHY_DOWN;
1202 goto out;
1203 }
1204
1205 /* error info record present */
1206 if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) {
1207 tstat->stat = SAM_CHECK_COND;
1208 mvs_slot_err(mvi, task, slot_idx);
1209 goto out;
1210 }
1211
1212 switch (task->task_proto) {
1213 case SAS_PROTOCOL_SSP:
1214 /* hw says status == 0, datapres == 0 */
1215 if (rx_desc & RXQ_GOOD) {
1216 tstat->stat = SAM_GOOD;
1217 tstat->resp = SAS_TASK_COMPLETE;
1218 }
1219 /* response frame present */
1220 else if (rx_desc & RXQ_RSP) {
1221 struct ssp_response_iu *iu =
1222 slot->response + sizeof(struct mvs_err_info);
1223 sas_ssp_task_response(&mvi->pdev->dev, task, iu);
1224 }
1225
1226 /* should never happen? */
1227 else
1228 tstat->stat = SAM_CHECK_COND;
1229 break;
1230
1231 case SAS_PROTOCOL_SMP: {
1232 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1233 tstat->stat = SAM_GOOD;
1234 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
1235 memcpy(to + sg_resp->offset,
1236 slot->response + sizeof(struct mvs_err_info),
1237 sg_dma_len(sg_resp));
1238 kunmap_atomic(to, KM_IRQ0);
1239 break;
1240 }
1241
1242 case SAS_PROTOCOL_SATA:
1243 case SAS_PROTOCOL_STP:
1244 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1245 struct ata_task_resp *resp =
1246 (struct ata_task_resp *)tstat->buf;
1247
1248 if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) ==
1249 RXQ_DONE)
1250 tstat->stat = SAM_GOOD;
1251 else
1252 tstat->stat = SAM_CHECK_COND;
1253
1254 resp->frame_len = sizeof(struct dev_to_host_fis);
1255 memcpy(&resp->ending_fis[0],
1256 SATA_RECEIVED_D2H_FIS(port->taskfileset),
1257 sizeof(struct dev_to_host_fis));
1258 if (resp->ending_fis[2] & ATA_ERR)
1259 mvs_hexdump(16, resp->ending_fis, 0);
1260 break;
1261 }
1262
1263 default:
1264 tstat->stat = SAM_CHECK_COND;
1265 break;
1266 }
1267
1268out:
1269 mvs_slot_free(mvi, task, slot, slot_idx);
1270 task->task_done(task);
1271 return tstat->stat;
1272}
1273
1274static void mvs_int_full(struct mvs_info *mvi)
1275{
1276 void __iomem *regs = mvi->regs;
1277 u32 tmp, stat;
1278 int i;
1279
1280 stat = mr32(INT_STAT);
1281
1282 mvs_int_rx(mvi, false);
1283
1284 for (i = 0; i < MVS_MAX_PORTS; i++) {
1285 tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
1286 if (tmp)
1287 mvs_int_port(mvi, i, tmp);
1288 }
1289
1290 if (stat & CINT_SRS)
1291 mvs_int_sata(mvi);
1292
1293 mw32(INT_STAT, stat);
1294}
1295
1296static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
1297{
1298 void __iomem *regs = mvi->regs;
1299 u32 rx_prod_idx, rx_desc;
1300 bool attn = false;
1301 struct pci_dev *pdev = mvi->pdev;
1302
1303 /* the first dword in the RX ring is special: it contains
1304 * a mirror of the hardware's RX producer index, so that
1305 * we don't have to stall the CPU reading that register.
1306 * The actual RX ring is offset by one dword, due to this.
1307 */
1308 rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
1309 if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */
1310 mvi->rx_cons = 0xfff;
1311 return 0;
1312 }
1313
1314 /* The CMPL_Q may come late, read from register and try again
1315 * note: if coalescing is enabled,
1316 * it will need to read from register every time for sure
1317 */
1318 if (mvi->rx_cons == rx_prod_idx)
1319 return 0;
1320
1321 if (mvi->rx_cons == 0xfff)
1322 mvi->rx_cons = MVS_RX_RING_SZ - 1;
1323
1324 while (mvi->rx_cons != rx_prod_idx) {
1325
1326 /* increment our internal RX consumer pointer */
1327 mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1);
1328
1329 rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]);
1330
1331 mvs_hba_cq_dump(mvi);
1332
1333 if (unlikely(rx_desc & RXQ_DONE))
1334 mvs_slot_complete(mvi, rx_desc);
1335 if (rx_desc & RXQ_ATTN) {
1336 attn = true;
1337 dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
1338 rx_desc);
1339 } else if (rx_desc & RXQ_ERR) {
1340 dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
1341 rx_desc);
1342 }
1343 }
1344
1345 if (attn && self_clear)
1346 mvs_int_full(mvi);
1347
1348 return 0;
1349}
1350
1351static irqreturn_t mvs_interrupt(int irq, void *opaque)
1352{
1353 struct mvs_info *mvi = opaque;
1354 void __iomem *regs = mvi->regs;
1355 u32 stat;
1356
1357 stat = mr32(GBL_INT_STAT);
1358
1359 /* clear CMD_CMPLT ASAP */
1360 mw32_f(INT_STAT, CINT_DONE);
1361
1362 if (stat == 0 || stat == 0xffffffff)
1363 return IRQ_NONE;
1364
1365 spin_lock(&mvi->lock);
1366
1367 mvs_int_full(mvi);
1368
1369 spin_unlock(&mvi->lock);
1370
1371 return IRQ_HANDLED;
1372}
1373
1374#ifndef MVS_DISABLE_MSI
1375static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
1376{
1377 struct mvs_info *mvi = opaque;
1378
1379 spin_lock(&mvi->lock);
1380
1381 mvs_int_rx(mvi, true);
1382
1383 spin_unlock(&mvi->lock);
1384
1385 return IRQ_HANDLED;
1386}
1387#endif
1388
1389struct mvs_task_exec_info {
1390 struct sas_task *task;
1391 struct mvs_cmd_hdr *hdr;
1392 struct mvs_port *port;
1393 u32 tag;
1394 int n_elem;
1395};
1396
1397static int mvs_task_prep_smp(struct mvs_info *mvi,
1398 struct mvs_task_exec_info *tei)
1399{
1400 int elem, rc, i;
1401 struct sas_task *task = tei->task;
1402 struct mvs_cmd_hdr *hdr = tei->hdr;
1403 struct scatterlist *sg_req, *sg_resp;
1404 u32 req_len, resp_len, tag = tei->tag;
1405 void *buf_tmp;
1406 u8 *buf_oaf;
1407 dma_addr_t buf_tmp_dma;
1408 struct mvs_prd *buf_prd;
1409 struct scatterlist *sg;
1410 struct mvs_slot_info *slot = &mvi->slot_info[tag];
1411 struct asd_sas_port *sas_port = task->dev->port;
1412 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1413#if _MV_DUMP
1414 u8 *buf_cmd;
1415 void *from;
1416#endif
1417 /*
1418 * DMA-map SMP request, response buffers
1419 */
1420 sg_req = &task->smp_task.smp_req;
1421 elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
1422 if (!elem)
1423 return -ENOMEM;
1424 req_len = sg_dma_len(sg_req);
1425
1426 sg_resp = &task->smp_task.smp_resp;
1427 elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
1428 if (!elem) {
1429 rc = -ENOMEM;
1430 goto err_out;
1431 }
1432 resp_len = sg_dma_len(sg_resp);
1433
1434 /* must be in dwords */
1435 if ((req_len & 0x3) || (resp_len & 0x3)) {
1436 rc = -EINVAL;
1437 goto err_out_2;
1438 }
1439
1440 /*
1441 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1442 */
1443
1444 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1445 buf_tmp = slot->buf;
1446 buf_tmp_dma = slot->buf_dma;
1447
1448#if _MV_DUMP
1449 buf_cmd = buf_tmp;
1450 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1451 buf_tmp += req_len;
1452 buf_tmp_dma += req_len;
1453 slot->cmd_size = req_len;
1454#else
1455 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
1456#endif
1457
1458 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1459 buf_oaf = buf_tmp;
1460 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1461
1462 buf_tmp += MVS_OAF_SZ;
1463 buf_tmp_dma += MVS_OAF_SZ;
1464
1465 /* region 3: PRD table ********************************************* */
1466 buf_prd = buf_tmp;
1467 if (tei->n_elem)
1468 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1469 else
1470 hdr->prd_tbl = 0;
1471
1472 i = sizeof(struct mvs_prd) * tei->n_elem;
1473 buf_tmp += i;
1474 buf_tmp_dma += i;
1475
1476 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1477 slot->response = buf_tmp;
1478 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1479
1480 /*
1481 * Fill in TX ring and command slot header
1482 */
1483 slot->tx = mvi->tx_prod;
1484 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
1485 TXQ_MODE_I | tag |
1486 (sas_port->phy_mask << TXQ_PHY_SHIFT));
1487
1488 hdr->flags |= flags;
1489 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
1490 hdr->tags = cpu_to_le32(tag);
1491 hdr->data_len = 0;
1492
1493 /* generate open address frame hdr (first 12 bytes) */
1494 buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
1495 buf_oaf[1] = task->dev->linkrate & 0xf;
1496 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
1497 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1498
1499 /* fill in PRD (scatter/gather) table, if any */
1500 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1501 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1502 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1503 buf_prd++;
1504 }
1505
1506#if _MV_DUMP
1507 /* copy cmd table */
1508 from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
1509 memcpy(buf_cmd, from + sg_req->offset, req_len);
1510 kunmap_atomic(from, KM_IRQ0);
1511#endif
1512 return 0;
1513
1514err_out_2:
1515 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
1516 PCI_DMA_FROMDEVICE);
1517err_out:
1518 pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
1519 PCI_DMA_TODEVICE);
1520 return rc;
1521}
1522
1523static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1524{
1525 void __iomem *regs = mvi->regs;
1526 u32 tmp, offs;
1527 u8 *tfs = &port->taskfileset;
1528
1529 if (*tfs == MVS_ID_NOT_MAPPED)
1530 return;
1531
1532 offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1533 if (*tfs < 16) {
1534 tmp = mr32(PCS);
1535 mw32(PCS, tmp & ~offs);
1536 } else {
1537 tmp = mr32(CTL);
1538 mw32(CTL, tmp & ~offs);
1539 }
1540
1541 tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
1542 if (tmp)
1543 mw32(INT_STAT_SRS, tmp);
1544
1545 *tfs = MVS_ID_NOT_MAPPED;
1546}
1547
1548static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
1549{
1550 int i;
1551 u32 tmp, offs;
1552 void __iomem *regs = mvi->regs;
1553
1554 if (port->taskfileset != MVS_ID_NOT_MAPPED)
1555 return 0;
1556
1557 tmp = mr32(PCS);
1558
1559 for (i = 0; i < mvi->chip->srs_sz; i++) {
1560 if (i == 16)
1561 tmp = mr32(CTL);
1562 offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
1563 if (!(tmp & offs)) {
1564 port->taskfileset = i;
1565
1566 if (i < 16)
1567 mw32(PCS, tmp | offs);
1568 else
1569 mw32(CTL, tmp | offs);
1570 tmp = mr32(INT_STAT_SRS) & (1U << i);
1571 if (tmp)
1572 mw32(INT_STAT_SRS, tmp);
1573 return 0;
1574 }
1575 }
1576 return MVS_ID_NOT_MAPPED;
1577}
1578
1579static u32 mvs_get_ncq_tag(struct sas_task *task)
1580{
1581 u32 tag = 0;
1582 struct ata_queued_cmd *qc = task->uldd_task;
1583
1584 if (qc)
1585 tag = qc->tag;
1586
1587 return tag;
1588}
1589
1590static int mvs_task_prep_ata(struct mvs_info *mvi,
1591 struct mvs_task_exec_info *tei)
1592{
1593 struct sas_task *task = tei->task;
1594 struct domain_device *dev = task->dev;
1595 struct mvs_cmd_hdr *hdr = tei->hdr;
1596 struct asd_sas_port *sas_port = dev->port;
1597 struct mvs_slot_info *slot;
1598 struct scatterlist *sg;
1599 struct mvs_prd *buf_prd;
1600 struct mvs_port *port = tei->port;
1601 u32 tag = tei->tag;
1602 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
1603 void *buf_tmp;
1604 u8 *buf_cmd, *buf_oaf;
1605 dma_addr_t buf_tmp_dma;
1606 u32 i, req_len, resp_len;
1607 const u32 max_resp_len = SB_RFB_MAX;
1608
1609 if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
1610 return -EBUSY;
1611
1612 slot = &mvi->slot_info[tag];
1613 slot->tx = mvi->tx_prod;
1614 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1615 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
1616 (sas_port->phy_mask << TXQ_PHY_SHIFT) |
1617 (port->taskfileset << TXQ_SRS_SHIFT));
1618
1619 if (task->ata_task.use_ncq)
1620 flags |= MCH_FPDMA;
1621 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
1622 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
1623 flags |= MCH_ATAPI;
1624 }
1625
1626 /* FIXME: fill in port multiplier number */
1627
1628 hdr->flags = cpu_to_le32(flags);
1629
1630 /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
1631 if (task->ata_task.use_ncq) {
1632 hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task));
1633 /*Fill in task file */
1634 task->ata_task.fis.sector_count = hdr->tags << 3;
1635 } else
1636 hdr->tags = cpu_to_le32(tag);
1637 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1638
1639 /*
1640 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1641 */
1642
1643 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
1644 buf_cmd = buf_tmp = slot->buf;
1645 buf_tmp_dma = slot->buf_dma;
1646
1647 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1648
1649 buf_tmp += MVS_ATA_CMD_SZ;
1650 buf_tmp_dma += MVS_ATA_CMD_SZ;
1651#if _MV_DUMP
1652 slot->cmd_size = MVS_ATA_CMD_SZ;
1653#endif
1654
1655 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1656 /* used for STP. unused for SATA? */
1657 buf_oaf = buf_tmp;
1658 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1659
1660 buf_tmp += MVS_OAF_SZ;
1661 buf_tmp_dma += MVS_OAF_SZ;
1662
1663 /* region 3: PRD table ********************************************* */
1664 buf_prd = buf_tmp;
1665 if (tei->n_elem)
1666 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1667 else
1668 hdr->prd_tbl = 0;
1669
1670 i = sizeof(struct mvs_prd) * tei->n_elem;
1671 buf_tmp += i;
1672 buf_tmp_dma += i;
1673
1674 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1675 /* FIXME: probably unused, for SATA. kept here just in case
1676 * we get a STP/SATA error information record
1677 */
1678 slot->response = buf_tmp;
1679 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1680
1681 req_len = sizeof(struct host_to_dev_fis);
1682 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
1683 sizeof(struct mvs_err_info) - i;
1684
1685 /* request, response lengths */
1686 resp_len = min(resp_len, max_resp_len);
1687 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1688
1689 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1690 /* fill in command FIS and ATAPI CDB */
1691 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1692 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
1693 memcpy(buf_cmd + STP_ATAPI_CMD,
1694 task->ata_task.atapi_packet, 16);
1695
1696 /* generate open address frame hdr (first 12 bytes) */
1697 buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
1698 buf_oaf[1] = task->dev->linkrate & 0xf;
1699 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1700 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1701
1702 /* fill in PRD (scatter/gather) table, if any */
1703 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1704 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1705 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1706 buf_prd++;
1707 }
1708
1709 return 0;
1710}
1711
1712static int mvs_task_prep_ssp(struct mvs_info *mvi,
1713 struct mvs_task_exec_info *tei)
1714{
1715 struct sas_task *task = tei->task;
1716 struct mvs_cmd_hdr *hdr = tei->hdr;
1717 struct mvs_port *port = tei->port;
1718 struct mvs_slot_info *slot;
1719 struct scatterlist *sg;
1720 struct mvs_prd *buf_prd;
1721 struct ssp_frame_hdr *ssp_hdr;
1722 void *buf_tmp;
1723 u8 *buf_cmd, *buf_oaf, fburst = 0;
1724 dma_addr_t buf_tmp_dma;
1725 u32 flags;
1726 u32 resp_len, req_len, i, tag = tei->tag;
1727 const u32 max_resp_len = SB_RFB_MAX;
1728
1729 slot = &mvi->slot_info[tag];
1730
1731 slot->tx = mvi->tx_prod;
1732 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
1733 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
1734 (port->wide_port_phymap << TXQ_PHY_SHIFT));
1735
1736 flags = MCH_RETRY;
1737 if (task->ssp_task.enable_first_burst) {
1738 flags |= MCH_FBURST;
1739 fburst = (1 << 7);
1740 }
1741 hdr->flags = cpu_to_le32(flags |
1742 (tei->n_elem << MCH_PRD_LEN_SHIFT) |
1743 (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
1744
1745 hdr->tags = cpu_to_le32(tag);
1746 hdr->data_len = cpu_to_le32(task->total_xfer_len);
1747
1748 /*
1749 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
1750 */
1751
1752 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
1753 buf_cmd = buf_tmp = slot->buf;
1754 buf_tmp_dma = slot->buf_dma;
1755
1756 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
1757
1758 buf_tmp += MVS_SSP_CMD_SZ;
1759 buf_tmp_dma += MVS_SSP_CMD_SZ;
1760#if _MV_DUMP
1761 slot->cmd_size = MVS_SSP_CMD_SZ;
1762#endif
1763
1764 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
1765 buf_oaf = buf_tmp;
1766 hdr->open_frame = cpu_to_le64(buf_tmp_dma);
1767
1768 buf_tmp += MVS_OAF_SZ;
1769 buf_tmp_dma += MVS_OAF_SZ;
1770
1771 /* region 3: PRD table ********************************************* */
1772 buf_prd = buf_tmp;
1773 if (tei->n_elem)
1774 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
1775 else
1776 hdr->prd_tbl = 0;
1777
1778 i = sizeof(struct mvs_prd) * tei->n_elem;
1779 buf_tmp += i;
1780 buf_tmp_dma += i;
1781
1782 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
1783 slot->response = buf_tmp;
1784 hdr->status_buf = cpu_to_le64(buf_tmp_dma);
1785
1786 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
1787 sizeof(struct mvs_err_info) - i;
1788 resp_len = min(resp_len, max_resp_len);
1789
1790 req_len = sizeof(struct ssp_frame_hdr) + 28;
1791
1792 /* request, response lengths */
1793 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
1794
1795 /* generate open address frame hdr (first 12 bytes) */
1796 buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
1797 buf_oaf[1] = task->dev->linkrate & 0xf;
1798 *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
1799 memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
1800
1801 /* fill in SSP frame header (Command Table.SSP frame header) */
1802 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
1803 ssp_hdr->frame_type = SSP_COMMAND;
1804 memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
1805 HASHED_SAS_ADDR_SIZE);
1806 memcpy(ssp_hdr->hashed_src_addr,
1807 task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
1808 ssp_hdr->tag = cpu_to_be16(tag);
1809
1810 /* fill in command frame IU */
1811 buf_cmd += sizeof(*ssp_hdr);
1812 memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1813 buf_cmd[9] = fburst | task->ssp_task.task_attr |
1814 (task->ssp_task.task_prio << 3);
1815 memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
1816
1817 /* fill in PRD (scatter/gather) table, if any */
1818 for_each_sg(task->scatter, sg, tei->n_elem, i) {
1819 buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
1820 buf_prd->len = cpu_to_le32(sg_dma_len(sg));
1821 buf_prd++;
1822 }
1823
1824 return 0;
1825}
1826
1827static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
1828{
1829 struct domain_device *dev = task->dev;
1830 struct mvs_info *mvi = dev->port->ha->lldd_ha;
1831 struct pci_dev *pdev = mvi->pdev;
1832 void __iomem *regs = mvi->regs;
1833 struct mvs_task_exec_info tei;
1834 struct sas_task *t = task;
1835 u32 tag = 0xdeadbeef, rc, n_elem = 0;
1836 unsigned long flags;
1837 u32 n = num, pass = 0;
1838
1839 spin_lock_irqsave(&mvi->lock, flags);
1840
1841 do {
1842 tei.port = &mvi->port[dev->port->id];
1843
1844 if (!tei.port->port_attached) {
1845 struct task_status_struct *ts = &t->task_status;
1846 ts->stat = SAS_PHY_DOWN;
1847 t->task_done(t);
1848 rc = 0;
1849 goto exec_exit;
1850 }
1851 if (!sas_protocol_ata(t->task_proto)) {
1852 if (t->num_scatter) {
1853 n_elem = pci_map_sg(mvi->pdev, t->scatter,
1854 t->num_scatter,
1855 t->data_dir);
1856 if (!n_elem) {
1857 rc = -ENOMEM;
1858 goto err_out;
1859 }
1860 }
1861 } else {
1862 n_elem = t->num_scatter;
1863 }
1864
1865 rc = mvs_tag_alloc(mvi, &tag);
1866 if (rc)
1867 goto err_out;
1868
1869 mvi->slot_info[tag].task = t;
1870 mvi->slot_info[tag].n_elem = n_elem;
1871 memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ);
1872 tei.task = t;
1873 tei.hdr = &mvi->slot[tag];
1874 tei.tag = tag;
1875 tei.n_elem = n_elem;
1876
1877 switch (t->task_proto) {
1878 case SAS_PROTOCOL_SMP:
1879 rc = mvs_task_prep_smp(mvi, &tei);
1880 break;
1881 case SAS_PROTOCOL_SSP:
1882 rc = mvs_task_prep_ssp(mvi, &tei);
1883 break;
1884 case SAS_PROTOCOL_SATA:
1885 case SAS_PROTOCOL_STP:
1886 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1887 rc = mvs_task_prep_ata(mvi, &tei);
1888 break;
1889 default:
1890 dev_printk(KERN_ERR, &pdev->dev,
1891 "unknown sas_task proto: 0x%x\n",
1892 t->task_proto);
1893 rc = -EINVAL;
1894 break;
1895 }
1896
1897 if (rc)
1898 goto err_out_tag;
1899
1900 /* TODO: select normal or high priority */
1901
1902 spin_lock(&t->task_state_lock);
1903 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
1904 spin_unlock(&t->task_state_lock);
1905
1906 if (n == 1) {
1907 spin_unlock_irqrestore(&mvi->lock, flags);
1908 mw32(TX_PROD_IDX, mvi->tx_prod);
1909 }
1910 mvs_hba_memory_dump(mvi, tag, t->task_proto);
1911
1912 ++pass;
1913 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
1914
1915 if (n == 1)
1916 break;
1917
1918 t = list_entry(t->list.next, struct sas_task, list);
1919 } while (--n);
1920
1921 return 0;
1922
1923err_out_tag:
1924 mvs_tag_free(mvi, tag);
1925err_out:
1926 dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
1927 if (!sas_protocol_ata(t->task_proto))
1928 if (n_elem)
1929 pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
1930 t->data_dir);
1931exec_exit:
1932 if (pass)
1933 mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
1934 spin_unlock_irqrestore(&mvi->lock, flags);
1935 return rc;
1936}
1937
1938static int mvs_task_abort(struct sas_task *task)
1939{
1940 int rc = 1;
1941 unsigned long flags;
1942 struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
1943 struct pci_dev *pdev = mvi->pdev;
1944
1945 spin_lock_irqsave(&task->task_state_lock, flags);
1946 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1947 rc = TMF_RESP_FUNC_COMPLETE;
1948 goto out_done;
1949 }
1950 spin_unlock_irqrestore(&task->task_state_lock, flags);
1951
1952 /*FIXME*/
1953 rc = TMF_RESP_FUNC_COMPLETE;
1954
1955 switch (task->task_proto) {
1956 case SAS_PROTOCOL_SMP:
1957 dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! ");
1958 break;
1959 case SAS_PROTOCOL_SSP:
1960 dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! ");
1961 break;
1962 case SAS_PROTOCOL_SATA:
1963 case SAS_PROTOCOL_STP:
1964 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
1965 dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! "
1966 "Dump D2H FIS: \n");
1967 mvs_hexdump(sizeof(struct host_to_dev_fis),
1968 (void *)&task->ata_task.fis, 0);
1969 dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
1970 mvs_hexdump(16, task->ata_task.atapi_packet, 0);
1971 break;
1972 }
1973 default:
1974 break;
1975 }
1976out_done:
1977 return rc;
1978}
1979
1980static void mvs_free(struct mvs_info *mvi)
1981{
1982 int i;
1983
1984 if (!mvi)
1985 return;
1986
1987 for (i = 0; i < MVS_SLOTS; i++) {
1988 struct mvs_slot_info *slot = &mvi->slot_info[i];
1989
1990 if (slot->buf)
1991 dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
1992 slot->buf, slot->buf_dma);
1993 }
1994
1995 if (mvi->tx)
1996 dma_free_coherent(&mvi->pdev->dev,
1997 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
1998 mvi->tx, mvi->tx_dma);
1999 if (mvi->rx_fis)
2000 dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
2001 mvi->rx_fis, mvi->rx_fis_dma);
2002 if (mvi->rx)
2003 dma_free_coherent(&mvi->pdev->dev,
2004 sizeof(*mvi->rx) * MVS_RX_RING_SZ,
2005 mvi->rx, mvi->rx_dma);
2006 if (mvi->slot)
2007 dma_free_coherent(&mvi->pdev->dev,
2008 sizeof(*mvi->slot) * MVS_SLOTS,
2009 mvi->slot, mvi->slot_dma);
2010#ifdef MVS_ENABLE_PERI
2011 if (mvi->peri_regs)
2012 iounmap(mvi->peri_regs);
2013#endif
2014 if (mvi->regs)
2015 iounmap(mvi->regs);
2016 if (mvi->shost)
2017 scsi_host_put(mvi->shost);
2018 kfree(mvi->sas.sas_port);
2019 kfree(mvi->sas.sas_phy);
2020 kfree(mvi);
2021}
2022
2023/* FIXME: locking? */
2024static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
2025 void *funcdata)
2026{
2027 struct mvs_info *mvi = sas_phy->ha->lldd_ha;
2028 int rc = 0, phy_id = sas_phy->id;
2029 u32 tmp;
2030
2031 tmp = mvs_read_phy_ctl(mvi, phy_id);
2032
2033 switch (func) {
2034 case PHY_FUNC_SET_LINK_RATE:{
2035 struct sas_phy_linkrates *rates = funcdata;
2036 u32 lrmin = 0, lrmax = 0;
2037
2038 lrmin = (rates->minimum_linkrate << 8);
2039 lrmax = (rates->maximum_linkrate << 12);
2040
2041 if (lrmin) {
2042 tmp &= ~(0xf << 8);
2043 tmp |= lrmin;
2044 }
2045 if (lrmax) {
2046 tmp &= ~(0xf << 12);
2047 tmp |= lrmax;
2048 }
2049 mvs_write_phy_ctl(mvi, phy_id, tmp);
2050 break;
2051 }
2052
2053 case PHY_FUNC_HARD_RESET:
2054 if (tmp & PHY_RST_HARD)
2055 break;
2056 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
2057 break;
2058
2059 case PHY_FUNC_LINK_RESET:
2060 mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
2061 break;
2062
2063 case PHY_FUNC_DISABLE:
2064 case PHY_FUNC_RELEASE_SPINUP_HOLD:
2065 default:
2066 rc = -EOPNOTSUPP;
2067 }
2068
2069 return rc;
2070}
2071
2072static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
2073{
2074 struct mvs_phy *phy = &mvi->phy[phy_id];
2075 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2076
2077 sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
2078 sas_phy->class = SAS;
2079 sas_phy->iproto = SAS_PROTOCOL_ALL;
2080 sas_phy->tproto = 0;
2081 sas_phy->type = PHY_TYPE_PHYSICAL;
2082 sas_phy->role = PHY_ROLE_INITIATOR;
2083 sas_phy->oob_mode = OOB_NOT_CONNECTED;
2084 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
2085
2086 sas_phy->id = phy_id;
2087 sas_phy->sas_addr = &mvi->sas_addr[0];
2088 sas_phy->frame_rcvd = &phy->frame_rcvd[0];
2089 sas_phy->ha = &mvi->sas;
2090 sas_phy->lldd_phy = phy;
2091}
2092
2093static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
2094 const struct pci_device_id *ent)
2095{
2096 struct mvs_info *mvi;
2097 unsigned long res_start, res_len, res_flag;
2098 struct asd_sas_phy **arr_phy;
2099 struct asd_sas_port **arr_port;
2100 const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
2101 int i;
2102
2103 /*
2104 * alloc and init our per-HBA mvs_info struct
2105 */
2106
2107 mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
2108 if (!mvi)
2109 return NULL;
2110
2111 spin_lock_init(&mvi->lock);
2112 mvi->pdev = pdev;
2113 mvi->chip = chip;
2114
2115 if (pdev->device == 0x6440 && pdev->revision == 0)
2116 mvi->flags |= MVF_PHY_PWR_FIX;
2117
2118 /*
2119 * alloc and init SCSI, SAS glue
2120 */
2121
2122 mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
2123 if (!mvi->shost)
2124 goto err_out;
2125
2126 arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2127 arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
2128 if (!arr_phy || !arr_port)
2129 goto err_out;
2130
2131 for (i = 0; i < MVS_MAX_PHYS; i++) {
2132 mvs_phy_init(mvi, i);
2133 arr_phy[i] = &mvi->phy[i].sas_phy;
2134 arr_port[i] = &mvi->port[i].sas_port;
2135 }
2136
2137 SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
2138 mvi->shost->transportt = mvs_stt;
2139 mvi->shost->max_id = 21;
2140 mvi->shost->max_lun = ~0;
2141 mvi->shost->max_channel = 0;
2142 mvi->shost->max_cmd_len = 16;
2143
2144 mvi->sas.sas_ha_name = DRV_NAME;
2145 mvi->sas.dev = &pdev->dev;
2146 mvi->sas.lldd_module = THIS_MODULE;
2147 mvi->sas.sas_addr = &mvi->sas_addr[0];
2148 mvi->sas.sas_phy = arr_phy;
2149 mvi->sas.sas_port = arr_port;
2150 mvi->sas.num_phys = chip->n_phy;
2151 mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1;
2152 mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
2153 mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1;
2154 mvi->sas.lldd_ha = mvi;
2155 mvi->sas.core.shost = mvi->shost;
2156
2157 mvs_tag_init(mvi);
2158
2159 /*
2160 * ioremap main and peripheral registers
2161 */
2162
2163#ifdef MVS_ENABLE_PERI
2164 res_start = pci_resource_start(pdev, 2);
2165 res_len = pci_resource_len(pdev, 2);
2166 if (!res_start || !res_len)
2167 goto err_out;
2168
2169 mvi->peri_regs = ioremap_nocache(res_start, res_len);
2170 if (!mvi->peri_regs)
2171 goto err_out;
2172#endif
2173
2174 res_start = pci_resource_start(pdev, 4);
2175 res_len = pci_resource_len(pdev, 4);
2176 if (!res_start || !res_len)
2177 goto err_out;
2178
2179 res_flag = pci_resource_flags(pdev, 4);
2180 if (res_flag & IORESOURCE_CACHEABLE)
2181 mvi->regs = ioremap(res_start, res_len);
2182 else
2183 mvi->regs = ioremap_nocache(res_start, res_len);
2184
2185 if (!mvi->regs)
2186 goto err_out;
2187
2188 /*
2189 * alloc and init our DMA areas
2190 */
2191
2192 mvi->tx = dma_alloc_coherent(&pdev->dev,
2193 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
2194 &mvi->tx_dma, GFP_KERNEL);
2195 if (!mvi->tx)
2196 goto err_out;
2197 memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
2198
2199 mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
2200 &mvi->rx_fis_dma, GFP_KERNEL);
2201 if (!mvi->rx_fis)
2202 goto err_out;
2203 memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
2204
2205 mvi->rx = dma_alloc_coherent(&pdev->dev,
2206 sizeof(*mvi->rx) * MVS_RX_RING_SZ,
2207 &mvi->rx_dma, GFP_KERNEL);
2208 if (!mvi->rx)
2209 goto err_out;
2210 memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ);
2211
2212 mvi->rx[0] = cpu_to_le32(0xfff);
2213 mvi->rx_cons = 0xfff;
2214
2215 mvi->slot = dma_alloc_coherent(&pdev->dev,
2216 sizeof(*mvi->slot) * MVS_SLOTS,
2217 &mvi->slot_dma, GFP_KERNEL);
2218 if (!mvi->slot)
2219 goto err_out;
2220 memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
2221
2222 for (i = 0; i < MVS_SLOTS; i++) {
2223 struct mvs_slot_info *slot = &mvi->slot_info[i];
2224
2225 slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
2226 &slot->buf_dma, GFP_KERNEL);
2227 if (!slot->buf)
2228 goto err_out;
2229 memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
2230 }
2231
2232 /* finally, read NVRAM to get our SAS address */
2233 if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
2234 goto err_out;
2235 return mvi;
2236
2237err_out:
2238 mvs_free(mvi);
2239 return NULL;
2240}
2241
2242static u32 mvs_cr32(void __iomem *regs, u32 addr)
2243{
2244 mw32(CMD_ADDR, addr);
2245 return mr32(CMD_DATA);
2246}
2247
2248static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
2249{
2250 mw32(CMD_ADDR, addr);
2251 mw32(CMD_DATA, val);
2252}
2253
2254static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
2255{
2256 void __iomem *regs = mvi->regs;
2257 return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
2258 mr32(P4_SER_CTLSTAT + (port - 4) * 4);
2259}
2260
2261static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
2262{
2263 void __iomem *regs = mvi->regs;
2264 if (port < 4)
2265 mw32(P0_SER_CTLSTAT + port * 4, val);
2266 else
2267 mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
2268}
2269
2270static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
2271{
2272 void __iomem *regs = mvi->regs + off;
2273 void __iomem *regs2 = mvi->regs + off2;
2274 return (port < 4)?readl(regs + port * 8):
2275 readl(regs2 + (port - 4) * 8);
2276}
2277
2278static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
2279 u32 port, u32 val)
2280{
2281 void __iomem *regs = mvi->regs + off;
2282 void __iomem *regs2 = mvi->regs + off2;
2283 if (port < 4)
2284 writel(val, regs + port * 8);
2285 else
2286 writel(val, regs2 + (port - 4) * 8);
2287}
2288
2289static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
2290{
2291 return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
2292}
2293
2294static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
2295{
2296 mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
2297}
2298
2299static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
2300{
2301 mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
2302}
2303
2304static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
2305{
2306 return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
2307}
2308
2309static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
2310{
2311 mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
2312}
2313
2314static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
2315{
2316 mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
2317}
2318
2319static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
2320{
2321 return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
2322}
2323
2324static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
2325{
2326 mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
2327}
2328
2329static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
2330{
2331 return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
2332}
2333
2334static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
2335{
2336 mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
2337}
2338
2339static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
2340{
2341 void __iomem *regs = mvi->regs;
2342 u32 tmp;
2343
2344 /* workaround for SATA R-ERR, to ignore phy glitch */
2345 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2346 tmp &= ~(1 << 9);
2347 tmp |= (1 << 10);
2348 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2349
2350 /* enable retry 127 times */
2351 mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
2352
2353 /* extend open frame timeout to max */
2354 tmp = mvs_cr32(regs, CMD_SAS_CTL0);
2355 tmp &= ~0xffff;
2356 tmp |= 0x3fff;
2357 mvs_cw32(regs, CMD_SAS_CTL0, tmp);
2358
2359 /* workaround for WDTIMEOUT , set to 550 ms */
2360 mvs_cw32(regs, CMD_WD_TIMER, 0xffffff);
2361
2362 /* not to halt for different port op during wideport link change */
2363 mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
2364
2365 /* workaround for Seagate disk not-found OOB sequence, recv
2366 * COMINIT before sending out COMWAKE */
2367 tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
2368 tmp &= 0x0000ffff;
2369 tmp |= 0x00fa0000;
2370 mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
2371
2372 tmp = mvs_cr32(regs, CMD_PHY_TIMER);
2373 tmp &= 0x1fffffff;
2374 tmp |= (2U << 29); /* 8 ms retry */
2375 mvs_cw32(regs, CMD_PHY_TIMER, tmp);
2376
2377 /* TEST - for phy decoding error, adjust voltage levels */
2378 mw32(P0_VSR_ADDR + 0, 0x8);
2379 mw32(P0_VSR_DATA + 0, 0x2F0);
2380
2381 mw32(P0_VSR_ADDR + 8, 0x8);
2382 mw32(P0_VSR_DATA + 8, 0x2F0);
2383
2384 mw32(P0_VSR_ADDR + 16, 0x8);
2385 mw32(P0_VSR_DATA + 16, 0x2F0);
2386
2387 mw32(P0_VSR_ADDR + 24, 0x8);
2388 mw32(P0_VSR_DATA + 24, 0x2F0);
2389
2390}
2391
2392static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
2393{
2394 void __iomem *regs = mvi->regs;
2395 u32 tmp;
2396
2397 tmp = mr32(PCS);
2398 if (mvi->chip->n_phy <= 4)
2399 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
2400 else
2401 tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
2402 mw32(PCS, tmp);
2403}
2404
2405static void mvs_detect_porttype(struct mvs_info *mvi, int i)
2406{
2407 void __iomem *regs = mvi->regs;
2408 u32 reg;
2409 struct mvs_phy *phy = &mvi->phy[i];
2410
2411 /* TODO check & save device type */
2412 reg = mr32(GBL_PORT_TYPE);
2413
2414 if (reg & MODE_SAS_SATA & (1 << i))
2415 phy->phy_type |= PORT_TYPE_SAS;
2416 else
2417 phy->phy_type |= PORT_TYPE_SATA;
2418}
2419
2420static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
2421{
2422 u32 *s = (u32 *) buf;
2423
2424 if (!s)
2425 return NULL;
2426
2427 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
2428 s[3] = mvs_read_port_cfg_data(mvi, i);
2429
2430 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
2431 s[2] = mvs_read_port_cfg_data(mvi, i);
2432
2433 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
2434 s[1] = mvs_read_port_cfg_data(mvi, i);
2435
2436 mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
2437 s[0] = mvs_read_port_cfg_data(mvi, i);
2438
2439 return (void *)s;
2440}
2441
2442static u32 mvs_is_sig_fis_received(u32 irq_status)
2443{
2444 return irq_status & PHYEV_SIG_FIS;
2445}
2446
2447static void mvs_update_wideport(struct mvs_info *mvi, int i)
2448{
2449 struct mvs_phy *phy = &mvi->phy[i];
2450 struct mvs_port *port = phy->port;
2451 int j, no;
2452
2453 for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
2454 if (no & 1) {
2455 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2456 mvs_write_port_cfg_data(mvi, no,
2457 port->wide_port_phymap);
2458 } else {
2459 mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
2460 mvs_write_port_cfg_data(mvi, no, 0);
2461 }
2462}
2463
2464static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
2465{
2466 u32 tmp;
2467 struct mvs_phy *phy = &mvi->phy[i];
2468 struct mvs_port *port;
2469
2470 tmp = mvs_read_phy_ctl(mvi, i);
2471
2472 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
2473 if (!phy->port)
2474 phy->phy_attached = 1;
2475 return tmp;
2476 }
2477
2478 port = phy->port;
2479 if (port) {
2480 if (phy->phy_type & PORT_TYPE_SAS) {
2481 port->wide_port_phymap &= ~(1U << i);
2482 if (!port->wide_port_phymap)
2483 port->port_attached = 0;
2484 mvs_update_wideport(mvi, i);
2485 } else if (phy->phy_type & PORT_TYPE_SATA)
2486 port->port_attached = 0;
2487 mvs_free_reg_set(mvi, phy->port);
2488 phy->port = NULL;
2489 phy->phy_attached = 0;
2490 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
2491 }
2492 return 0;
2493}
2494
2495static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
2496 int get_st)
2497{
2498 struct mvs_phy *phy = &mvi->phy[i];
2499 struct pci_dev *pdev = mvi->pdev;
2500 u32 tmp, j;
2501 u64 tmp64;
2502
2503 mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
2504 phy->dev_info = mvs_read_port_cfg_data(mvi, i);
2505
2506 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2507 phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2508
2509 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2510 phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2511
2512 if (get_st) {
2513 phy->irq_status = mvs_read_port_irq_stat(mvi, i);
2514 phy->phy_status = mvs_is_phy_ready(mvi, i);
2515 }
2516
2517 if (phy->phy_status) {
2518 u32 phy_st;
2519 struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
2520
2521 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
2522 phy_st = mvs_read_port_cfg_data(mvi, i);
2523
2524 sas_phy->linkrate =
2525 (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2526 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
2527
2528 /* Updated attached_sas_addr */
2529 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
2530 phy->att_dev_sas_addr =
2531 (u64) mvs_read_port_cfg_data(mvi, i) << 32;
2532
2533 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
2534 phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
2535
2536 dev_printk(KERN_DEBUG, &pdev->dev,
2537 "phy[%d] Get Attached Address 0x%llX ,"
2538 " SAS Address 0x%llX\n",
2539 i, phy->att_dev_sas_addr, phy->dev_sas_addr);
2540 dev_printk(KERN_DEBUG, &pdev->dev,
2541 "Rate = %x , type = %d\n",
2542 sas_phy->linkrate, phy->phy_type);
2543
2544#if 1
2545 /*
2546 * If the device is capable of supporting a wide port
2547 * on its phys, it may configure the phys as a wide port.
2548 */
2549 if (phy->phy_type & PORT_TYPE_SAS)
2550 for (j = 0; j < mvi->chip->n_phy && j != i; ++j) {
2551 if ((mvi->phy[j].phy_attached) &&
2552 (mvi->phy[j].phy_type & PORT_TYPE_SAS))
2553 if (phy->att_dev_sas_addr ==
2554 mvi->phy[j].att_dev_sas_addr - 1) {
2555 phy->att_dev_sas_addr =
2556 mvi->phy[j].att_dev_sas_addr;
2557 break;
2558 }
2559 }
2560
2561#endif
2562
2563 tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
2564 memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
2565
2566 if (phy->phy_type & PORT_TYPE_SAS) {
2567 mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
2568 phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
2569 phy->identify.device_type =
2570 phy->att_dev_info & PORT_DEV_TYPE_MASK;
2571
2572 if (phy->identify.device_type == SAS_END_DEV)
2573 phy->identify.target_port_protocols =
2574 SAS_PROTOCOL_SSP;
2575 else if (phy->identify.device_type != NO_DEVICE)
2576 phy->identify.target_port_protocols =
2577 SAS_PROTOCOL_SMP;
2578 if (phy_st & PHY_OOB_DTCTD)
2579 sas_phy->oob_mode = SAS_OOB_MODE;
2580 phy->frame_rcvd_size =
2581 sizeof(struct sas_identify_frame);
2582 } else if (phy->phy_type & PORT_TYPE_SATA) {
2583 phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
2584 if (mvs_is_sig_fis_received(phy->irq_status)) {
2585 if (phy_st & PHY_OOB_DTCTD)
2586 sas_phy->oob_mode = SATA_OOB_MODE;
2587 phy->frame_rcvd_size =
2588 sizeof(struct dev_to_host_fis);
2589 mvs_get_d2h_reg(mvi, i,
2590 (void *)sas_phy->frame_rcvd);
2591 } else {
2592 dev_printk(KERN_DEBUG, &pdev->dev,
2593 "No sig fis\n");
2594 }
2595 }
2596 /* workaround for HW phy decoding error on 1.5g disk drive */
2597 mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
2598 tmp = mvs_read_port_vsr_data(mvi, i);
2599 if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
2600 PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
2601 SAS_LINK_RATE_1_5_GBPS)
2602 tmp &= ~PHY_MODE6_DTL_SPEED;
2603 else
2604 tmp |= PHY_MODE6_DTL_SPEED;
2605 mvs_write_port_vsr_data(mvi, i, tmp);
2606
2607 }
2608 if (get_st)
2609 mvs_write_port_irq_stat(mvi, i, phy->irq_status);
2610}
2611
2612static void mvs_port_formed(struct asd_sas_phy *sas_phy)
2613{
2614 struct sas_ha_struct *sas_ha = sas_phy->ha;
2615 struct mvs_info *mvi = sas_ha->lldd_ha;
2616 struct asd_sas_port *sas_port = sas_phy->port;
2617 struct mvs_phy *phy = sas_phy->lldd_phy;
2618 struct mvs_port *port = &mvi->port[sas_port->id];
2619 unsigned long flags;
2620
2621 spin_lock_irqsave(&mvi->lock, flags);
2622 port->port_attached = 1;
2623 phy->port = port;
2624 port->taskfileset = MVS_ID_NOT_MAPPED;
2625 if (phy->phy_type & PORT_TYPE_SAS) {
2626 port->wide_port_phymap = sas_port->phy_mask;
2627 mvs_update_wideport(mvi, sas_phy->id);
2628 }
2629 spin_unlock_irqrestore(&mvi->lock, flags);
2630}
2631
2632static int __devinit mvs_hw_init(struct mvs_info *mvi)
2633{
2634 void __iomem *regs = mvi->regs;
2635 int i;
2636 u32 tmp, cctl;
2637
2638 /* make sure interrupts are masked immediately (paranoia) */
2639 mw32(GBL_CTL, 0);
2640 tmp = mr32(GBL_CTL);
2641
2642 /* Reset Controller */
2643 if (!(tmp & HBA_RST)) {
2644 if (mvi->flags & MVF_PHY_PWR_FIX) {
2645 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2646 tmp &= ~PCTL_PWR_ON;
2647 tmp |= PCTL_OFF;
2648 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2649
2650 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2651 tmp &= ~PCTL_PWR_ON;
2652 tmp |= PCTL_OFF;
2653 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2654 }
2655
2656 /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
2657 mw32_f(GBL_CTL, HBA_RST);
2658 }
2659
2660 /* wait for reset to finish; timeout is just a guess */
2661 i = 1000;
2662 while (i-- > 0) {
2663 msleep(10);
2664
2665 if (!(mr32(GBL_CTL) & HBA_RST))
2666 break;
2667 }
2668 if (mr32(GBL_CTL) & HBA_RST) {
2669 dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
2670 return -EBUSY;
2671 }
2672
2673 /* Init Chip */
2674 /* make sure RST is set; HBA_RST /should/ have done that for us */
2675 cctl = mr32(CTL);
2676 if (cctl & CCTL_RST)
2677 cctl &= ~CCTL_RST;
2678 else
2679 mw32_f(CTL, cctl | CCTL_RST);
2680
2681 /* write to device control _AND_ device status register? - A.C. */
2682 pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
2683 tmp &= ~PRD_REQ_MASK;
2684 tmp |= PRD_REQ_SIZE;
2685 pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
2686
2687 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
2688 tmp |= PCTL_PWR_ON;
2689 tmp &= ~PCTL_OFF;
2690 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
2691
2692 pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
2693 tmp |= PCTL_PWR_ON;
2694 tmp &= ~PCTL_OFF;
2695 pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
2696
2697 mw32_f(CTL, cctl);
2698
2699 /* reset control */
2700 mw32(PCS, 0); /*MVS_PCS */
2701
2702 mvs_phy_hacks(mvi);
2703
2704 mw32(CMD_LIST_LO, mvi->slot_dma);
2705 mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
2706
2707 mw32(RX_FIS_LO, mvi->rx_fis_dma);
2708 mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
2709
2710 mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
2711 mw32(TX_LO, mvi->tx_dma);
2712 mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
2713
2714 mw32(RX_CFG, MVS_RX_RING_SZ);
2715 mw32(RX_LO, mvi->rx_dma);
2716 mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
2717
2718 /* enable auto port detection */
2719 mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
2720 msleep(100);
2721 /* init and reset phys */
2722 for (i = 0; i < mvi->chip->n_phy; i++) {
2723 /* FIXME: is this the correct dword order? */
2724 u32 lo = *((u32 *)&mvi->sas_addr[0]);
2725 u32 hi = *((u32 *)&mvi->sas_addr[4]);
2726
2727 mvs_detect_porttype(mvi, i);
2728
2729 /* set phy local SAS address */
2730 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
2731 mvs_write_port_cfg_data(mvi, i, lo);
2732 mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
2733 mvs_write_port_cfg_data(mvi, i, hi);
2734
2735 /* reset phy */
2736 tmp = mvs_read_phy_ctl(mvi, i);
2737 tmp |= PHY_RST;
2738 mvs_write_phy_ctl(mvi, i, tmp);
2739 }
2740
2741 msleep(100);
2742
2743 for (i = 0; i < mvi->chip->n_phy; i++) {
2744 /* clear phy int status */
2745 tmp = mvs_read_port_irq_stat(mvi, i);
2746 tmp &= ~PHYEV_SIG_FIS;
2747 mvs_write_port_irq_stat(mvi, i, tmp);
2748
2749 /* set phy int mask */
2750 tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
2751 PHYEV_ID_DONE | PHYEV_DEC_ERR;
2752 mvs_write_port_irq_mask(mvi, i, tmp);
2753
2754 msleep(100);
2755 mvs_update_phyinfo(mvi, i, 1);
2756 mvs_enable_xmt(mvi, i);
2757 }
2758
2759 /* FIXME: update wide port bitmaps */
2760
2761 /* little endian for open address and command table, etc. */
2762 /* A.C.
2763 * it seems that ( from the spec ) turning on big-endian won't
2764 * do us any good on big-endian machines, need further confirmation
2765 */
2766 cctl = mr32(CTL);
2767 cctl |= CCTL_ENDIAN_CMD;
2768 cctl |= CCTL_ENDIAN_DATA;
2769 cctl &= ~CCTL_ENDIAN_OPEN;
2770 cctl |= CCTL_ENDIAN_RSP;
2771 mw32_f(CTL, cctl);
2772
2773 /* reset CMD queue */
2774 tmp = mr32(PCS);
2775 tmp |= PCS_CMD_RST;
2776 mw32(PCS, tmp);
2777 /* interrupt coalescing may cause missing HW interrput in some case,
2778 * and the max count is 0x1ff, while our max slot is 0x200,
2779 * it will make count 0.
2780 */
2781 tmp = 0;
2782 mw32(INT_COAL, tmp);
2783
2784 tmp = 0x100;
2785 mw32(INT_COAL_TMOUT, tmp);
2786
2787 /* ladies and gentlemen, start your engines */
2788 mw32(TX_CFG, 0);
2789 mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
2790 mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
2791 /* enable CMD/CMPL_Q/RESP mode */
2792 mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
2793
2794 /* re-enable interrupts globally */
2795 mvs_hba_interrupt_enable(mvi);
2796
2797 /* enable completion queue interrupt */
2798 tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM);
2799 mw32(INT_MASK, tmp);
2800
2801 return 0;
2802}
2803
2804static void __devinit mvs_print_info(struct mvs_info *mvi)
2805{
2806 struct pci_dev *pdev = mvi->pdev;
2807 static int printed_version;
2808
2809 if (!printed_version++)
2810 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2811
2812 dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
2813 mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
2814}
2815
2816static int __devinit mvs_pci_init(struct pci_dev *pdev,
2817 const struct pci_device_id *ent)
2818{
2819 int rc;
2820 struct mvs_info *mvi;
2821 irq_handler_t irq_handler = mvs_interrupt;
2822
2823 rc = pci_enable_device(pdev);
2824 if (rc)
2825 return rc;
2826
2827 pci_set_master(pdev);
2828
2829 rc = pci_request_regions(pdev, DRV_NAME);
2830 if (rc)
2831 goto err_out_disable;
2832
2833 rc = pci_go_64(pdev);
2834 if (rc)
2835 goto err_out_regions;
2836
2837 mvi = mvs_alloc(pdev, ent);
2838 if (!mvi) {
2839 rc = -ENOMEM;
2840 goto err_out_regions;
2841 }
2842
2843 rc = mvs_hw_init(mvi);
2844 if (rc)
2845 goto err_out_mvi;
2846
2847#ifndef MVS_DISABLE_MSI
2848 if (!pci_enable_msi(pdev)) {
2849 u32 tmp;
2850 void __iomem *regs = mvi->regs;
2851 mvi->flags |= MVF_MSI;
2852 irq_handler = mvs_msi_interrupt;
2853 tmp = mr32(PCS);
2854 mw32(PCS, tmp | PCS_SELF_CLEAR);
2855 }
2856#endif
2857
2858 rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
2859 if (rc)
2860 goto err_out_msi;
2861
2862 rc = scsi_add_host(mvi->shost, &pdev->dev);
2863 if (rc)
2864 goto err_out_irq;
2865
2866 rc = sas_register_ha(&mvi->sas);
2867 if (rc)
2868 goto err_out_shost;
2869
2870 pci_set_drvdata(pdev, mvi);
2871
2872 mvs_print_info(mvi);
2873
2874 scsi_scan_host(mvi->shost);
2875
2876 return 0;
2877
2878err_out_shost:
2879 scsi_remove_host(mvi->shost);
2880err_out_irq:
2881 free_irq(pdev->irq, mvi);
2882err_out_msi:
2883 if (mvi->flags |= MVF_MSI)
2884 pci_disable_msi(pdev);
2885err_out_mvi:
2886 mvs_free(mvi);
2887err_out_regions:
2888 pci_release_regions(pdev);
2889err_out_disable:
2890 pci_disable_device(pdev);
2891 return rc;
2892}
2893
2894static void __devexit mvs_pci_remove(struct pci_dev *pdev)
2895{
2896 struct mvs_info *mvi = pci_get_drvdata(pdev);
2897
2898 pci_set_drvdata(pdev, NULL);
2899
2900 if (mvi) {
2901 sas_unregister_ha(&mvi->sas);
2902 mvs_hba_interrupt_disable(mvi);
2903 sas_remove_host(mvi->shost);
2904 scsi_remove_host(mvi->shost);
2905
2906 free_irq(pdev->irq, mvi);
2907 if (mvi->flags & MVF_MSI)
2908 pci_disable_msi(pdev);
2909 mvs_free(mvi);
2910 pci_release_regions(pdev);
2911 }
2912 pci_disable_device(pdev);
2913}
2914
2915static struct sas_domain_function_template mvs_transport_ops = {
2916 .lldd_execute_task = mvs_task_exec,
2917 .lldd_control_phy = mvs_phy_control,
2918 .lldd_abort_task = mvs_task_abort,
2919 .lldd_port_formed = mvs_port_formed
2920};
2921
2922static struct pci_device_id __devinitdata mvs_pci_table[] = {
2923 { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
2924 { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
2925 { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
2926 { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
2927
2928 { } /* terminate list */
2929};
2930
2931static struct pci_driver mvs_pci_driver = {
2932 .name = DRV_NAME,
2933 .id_table = mvs_pci_table,
2934 .probe = mvs_pci_init,
2935 .remove = __devexit_p(mvs_pci_remove),
2936};
2937
2938static int __init mvs_init(void)
2939{
2940 int rc;
2941
2942 mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
2943 if (!mvs_stt)
2944 return -ENOMEM;
2945
2946 rc = pci_register_driver(&mvs_pci_driver);
2947 if (rc)
2948 goto err_out;
2949
2950 return 0;
2951
2952err_out:
2953 sas_release_transport(mvs_stt);
2954 return rc;
2955}
2956
2957static void __exit mvs_exit(void)
2958{
2959 pci_unregister_driver(&mvs_pci_driver);
2960 sas_release_transport(mvs_stt);
2961}
2962
2963module_init(mvs_init);
2964module_exit(mvs_exit);
2965
2966MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
2967MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
2968MODULE_VERSION(DRV_VERSION);
2969MODULE_LICENSE("GPL");
2970MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 1479c60441c8..2cd899bfe84b 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -23,7 +23,7 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
23 mutex_lock(&ha->fce_mutex); 23 mutex_lock(&ha->fce_mutex);
24 24
25 seq_printf(s, "FCE Trace Buffer\n"); 25 seq_printf(s, "FCE Trace Buffer\n");
26 seq_printf(s, "In Pointer = %llx\n\n", ha->fce_wr); 26 seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
27 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); 27 seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
28 seq_printf(s, "FCE Enable Registers\n"); 28 seq_printf(s, "FCE Enable Registers\n");
29 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", 29 seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 0f029d0d7315..fc84db4069f4 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -100,8 +100,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
100 100
101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { 101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
102 scsi_set_resid(cmd, residual); 102 scsi_set_resid(cmd, residual);
103 if (!scsi_status && ((scsi_bufflen(cmd) - residual) < 103 if ((scsi_bufflen(cmd) - residual) < cmd->underflow) {
104 cmd->underflow)) {
105 104
106 cmd->result = DID_ERROR << 16; 105 cmd->result = DID_ERROR << 16;
107 106
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 65455ab1f3b9..4a1cf6377f6c 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -651,7 +651,7 @@ static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
651 651
652static irqreturn_t qpti_intr(int irq, void *dev_id); 652static irqreturn_t qpti_intr(int irq, void *dev_id);
653 653
654static void __init qpti_chain_add(struct qlogicpti *qpti) 654static void __devinit qpti_chain_add(struct qlogicpti *qpti)
655{ 655{
656 spin_lock_irq(&qptichain_lock); 656 spin_lock_irq(&qptichain_lock);
657 if (qptichain != NULL) { 657 if (qptichain != NULL) {
@@ -667,7 +667,7 @@ static void __init qpti_chain_add(struct qlogicpti *qpti)
667 spin_unlock_irq(&qptichain_lock); 667 spin_unlock_irq(&qptichain_lock);
668} 668}
669 669
670static void __init qpti_chain_del(struct qlogicpti *qpti) 670static void __devexit qpti_chain_del(struct qlogicpti *qpti)
671{ 671{
672 spin_lock_irq(&qptichain_lock); 672 spin_lock_irq(&qptichain_lock);
673 if (qptichain == qpti) { 673 if (qptichain == qpti) {
@@ -682,7 +682,7 @@ static void __init qpti_chain_del(struct qlogicpti *qpti)
682 spin_unlock_irq(&qptichain_lock); 682 spin_unlock_irq(&qptichain_lock);
683} 683}
684 684
685static int __init qpti_map_regs(struct qlogicpti *qpti) 685static int __devinit qpti_map_regs(struct qlogicpti *qpti)
686{ 686{
687 struct sbus_dev *sdev = qpti->sdev; 687 struct sbus_dev *sdev = qpti->sdev;
688 688
@@ -705,7 +705,7 @@ static int __init qpti_map_regs(struct qlogicpti *qpti)
705 return 0; 705 return 0;
706} 706}
707 707
708static int __init qpti_register_irq(struct qlogicpti *qpti) 708static int __devinit qpti_register_irq(struct qlogicpti *qpti)
709{ 709{
710 struct sbus_dev *sdev = qpti->sdev; 710 struct sbus_dev *sdev = qpti->sdev;
711 711
@@ -730,7 +730,7 @@ fail:
730 return -1; 730 return -1;
731} 731}
732 732
733static void __init qpti_get_scsi_id(struct qlogicpti *qpti) 733static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
734{ 734{
735 qpti->scsi_id = prom_getintdefault(qpti->prom_node, 735 qpti->scsi_id = prom_getintdefault(qpti->prom_node,
736 "initiator-id", 736 "initiator-id",
@@ -783,7 +783,7 @@ static void qpti_get_clock(struct qlogicpti *qpti)
783/* The request and response queues must each be aligned 783/* The request and response queues must each be aligned
784 * on a page boundary. 784 * on a page boundary.
785 */ 785 */
786static int __init qpti_map_queues(struct qlogicpti *qpti) 786static int __devinit qpti_map_queues(struct qlogicpti *qpti)
787{ 787{
788 struct sbus_dev *sdev = qpti->sdev; 788 struct sbus_dev *sdev = qpti->sdev;
789 789
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1541c174937a..d1777a9a9625 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -222,7 +222,7 @@ static struct scsi_host_template sdebug_driver_template = {
222 .cmd_per_lun = 16, 222 .cmd_per_lun = 16,
223 .max_sectors = 0xffff, 223 .max_sectors = 0xffff,
224 .unchecked_isa_dma = 0, 224 .unchecked_isa_dma = 0,
225 .use_clustering = ENABLE_CLUSTERING, 225 .use_clustering = DISABLE_CLUSTERING,
226 .module = THIS_MODULE, 226 .module = THIS_MODULE,
227}; 227};
228 228
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 135c1d054701..ba21d97d1855 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1014,10 +1014,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1014 } 1014 }
1015 1015
1016 req->buffer = NULL; 1016 req->buffer = NULL;
1017 if (blk_pc_request(req))
1018 sdb->length = req->data_len;
1019 else
1020 sdb->length = req->nr_sectors << 9;
1021 1017
1022 /* 1018 /*
1023 * Next, walk the list, and fill in the addresses and sizes of 1019 * Next, walk the list, and fill in the addresses and sizes of
@@ -1026,6 +1022,10 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1026 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 1022 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
1027 BUG_ON(count > sdb->table.nents); 1023 BUG_ON(count > sdb->table.nents);
1028 sdb->table.nents = count; 1024 sdb->table.nents = count;
1025 if (blk_pc_request(req))
1026 sdb->length = req->data_len;
1027 else
1028 sdb->length = req->nr_sectors << 9;
1029 return BLKPREP_OK; 1029 return BLKPREP_OK;
1030} 1030}
1031 1031
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index fac7534f3ec4..9981682d5302 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -231,7 +231,7 @@ static struct {
231 { ISCSI_SESSION_FREE, "FREE" }, 231 { ISCSI_SESSION_FREE, "FREE" },
232}; 232};
233 233
234const char *iscsi_session_state_name(int state) 234static const char *iscsi_session_state_name(int state)
235{ 235{
236 int i; 236 int i;
237 char *name = NULL; 237 char *name = NULL;
@@ -373,7 +373,7 @@ static void session_recovery_timedout(struct work_struct *work)
373 scsi_target_unblock(&session->dev); 373 scsi_target_unblock(&session->dev);
374} 374}
375 375
376void __iscsi_unblock_session(struct iscsi_cls_session *session) 376static void __iscsi_unblock_session(struct iscsi_cls_session *session)
377{ 377{
378 if (!cancel_delayed_work(&session->recovery_work)) 378 if (!cancel_delayed_work(&session->recovery_work))
379 flush_workqueue(iscsi_eh_timer_workq); 379 flush_workqueue(iscsi_eh_timer_workq);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 37df8bbe7f46..7aee64dbfbeb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1835,8 +1835,7 @@ static int sd_suspend(struct device *dev, pm_message_t mesg)
1835 goto done; 1835 goto done;
1836 } 1836 }
1837 1837
1838 if (mesg.event == PM_EVENT_SUSPEND && 1838 if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) {
1839 sdkp->device->manage_start_stop) {
1840 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); 1839 sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
1841 ret = sd_start_stop_device(sdkp, 0); 1840 ret = sd_start_stop_device(sdkp, 0);
1842 } 1841 }
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 2a6e4f472eaa..a6d96694d0a5 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -33,9 +33,9 @@
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34 34
35struct ses_device { 35struct ses_device {
36 char *page1; 36 unsigned char *page1;
37 char *page2; 37 unsigned char *page2;
38 char *page10; 38 unsigned char *page10;
39 short page1_len; 39 short page1_len;
40 short page2_len; 40 short page2_len;
41 short page10_len; 41 short page10_len;
@@ -67,7 +67,7 @@ static int ses_probe(struct device *dev)
67static int ses_recv_diag(struct scsi_device *sdev, int page_code, 67static int ses_recv_diag(struct scsi_device *sdev, int page_code,
68 void *buf, int bufflen) 68 void *buf, int bufflen)
69{ 69{
70 char cmd[] = { 70 unsigned char cmd[] = {
71 RECEIVE_DIAGNOSTIC, 71 RECEIVE_DIAGNOSTIC,
72 1, /* Set PCV bit */ 72 1, /* Set PCV bit */
73 page_code, 73 page_code,
@@ -85,7 +85,7 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
85{ 85{
86 u32 result; 86 u32 result;
87 87
88 char cmd[] = { 88 unsigned char cmd[] = {
89 SEND_DIAGNOSTIC, 89 SEND_DIAGNOSTIC,
90 0x10, /* Set PF bit */ 90 0x10, /* Set PF bit */
91 0, 91 0,
@@ -104,13 +104,13 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
104 104
105static int ses_set_page2_descriptor(struct enclosure_device *edev, 105static int ses_set_page2_descriptor(struct enclosure_device *edev,
106 struct enclosure_component *ecomp, 106 struct enclosure_component *ecomp,
107 char *desc) 107 unsigned char *desc)
108{ 108{
109 int i, j, count = 0, descriptor = ecomp->number; 109 int i, j, count = 0, descriptor = ecomp->number;
110 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); 110 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
111 struct ses_device *ses_dev = edev->scratch; 111 struct ses_device *ses_dev = edev->scratch;
112 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 112 unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
113 char *desc_ptr = ses_dev->page2 + 8; 113 unsigned char *desc_ptr = ses_dev->page2 + 8;
114 114
115 /* Clear everything */ 115 /* Clear everything */
116 memset(desc_ptr, 0, ses_dev->page2_len - 8); 116 memset(desc_ptr, 0, ses_dev->page2_len - 8);
@@ -133,14 +133,14 @@ static int ses_set_page2_descriptor(struct enclosure_device *edev,
133 return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 133 return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
134} 134}
135 135
136static char *ses_get_page2_descriptor(struct enclosure_device *edev, 136static unsigned char *ses_get_page2_descriptor(struct enclosure_device *edev,
137 struct enclosure_component *ecomp) 137 struct enclosure_component *ecomp)
138{ 138{
139 int i, j, count = 0, descriptor = ecomp->number; 139 int i, j, count = 0, descriptor = ecomp->number;
140 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev); 140 struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
141 struct ses_device *ses_dev = edev->scratch; 141 struct ses_device *ses_dev = edev->scratch;
142 char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 142 unsigned char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
143 char *desc_ptr = ses_dev->page2 + 8; 143 unsigned char *desc_ptr = ses_dev->page2 + 8;
144 144
145 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len); 145 ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
146 146
@@ -160,17 +160,18 @@ static char *ses_get_page2_descriptor(struct enclosure_device *edev,
160static void ses_get_fault(struct enclosure_device *edev, 160static void ses_get_fault(struct enclosure_device *edev,
161 struct enclosure_component *ecomp) 161 struct enclosure_component *ecomp)
162{ 162{
163 char *desc; 163 unsigned char *desc;
164 164
165 desc = ses_get_page2_descriptor(edev, ecomp); 165 desc = ses_get_page2_descriptor(edev, ecomp);
166 ecomp->fault = (desc[3] & 0x60) >> 4; 166 if (desc)
167 ecomp->fault = (desc[3] & 0x60) >> 4;
167} 168}
168 169
169static int ses_set_fault(struct enclosure_device *edev, 170static int ses_set_fault(struct enclosure_device *edev,
170 struct enclosure_component *ecomp, 171 struct enclosure_component *ecomp,
171 enum enclosure_component_setting val) 172 enum enclosure_component_setting val)
172{ 173{
173 char desc[4] = {0 }; 174 unsigned char desc[4] = {0 };
174 175
175 switch (val) { 176 switch (val) {
176 case ENCLOSURE_SETTING_DISABLED: 177 case ENCLOSURE_SETTING_DISABLED:
@@ -190,26 +191,28 @@ static int ses_set_fault(struct enclosure_device *edev,
190static void ses_get_status(struct enclosure_device *edev, 191static void ses_get_status(struct enclosure_device *edev,
191 struct enclosure_component *ecomp) 192 struct enclosure_component *ecomp)
192{ 193{
193 char *desc; 194 unsigned char *desc;
194 195
195 desc = ses_get_page2_descriptor(edev, ecomp); 196 desc = ses_get_page2_descriptor(edev, ecomp);
196 ecomp->status = (desc[0] & 0x0f); 197 if (desc)
198 ecomp->status = (desc[0] & 0x0f);
197} 199}
198 200
199static void ses_get_locate(struct enclosure_device *edev, 201static void ses_get_locate(struct enclosure_device *edev,
200 struct enclosure_component *ecomp) 202 struct enclosure_component *ecomp)
201{ 203{
202 char *desc; 204 unsigned char *desc;
203 205
204 desc = ses_get_page2_descriptor(edev, ecomp); 206 desc = ses_get_page2_descriptor(edev, ecomp);
205 ecomp->locate = (desc[2] & 0x02) ? 1 : 0; 207 if (desc)
208 ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
206} 209}
207 210
208static int ses_set_locate(struct enclosure_device *edev, 211static int ses_set_locate(struct enclosure_device *edev,
209 struct enclosure_component *ecomp, 212 struct enclosure_component *ecomp,
210 enum enclosure_component_setting val) 213 enum enclosure_component_setting val)
211{ 214{
212 char desc[4] = {0 }; 215 unsigned char desc[4] = {0 };
213 216
214 switch (val) { 217 switch (val) {
215 case ENCLOSURE_SETTING_DISABLED: 218 case ENCLOSURE_SETTING_DISABLED:
@@ -229,7 +232,7 @@ static int ses_set_active(struct enclosure_device *edev,
229 struct enclosure_component *ecomp, 232 struct enclosure_component *ecomp,
230 enum enclosure_component_setting val) 233 enum enclosure_component_setting val)
231{ 234{
232 char desc[4] = {0 }; 235 unsigned char desc[4] = {0 };
233 236
234 switch (val) { 237 switch (val) {
235 case ENCLOSURE_SETTING_DISABLED: 238 case ENCLOSURE_SETTING_DISABLED:
@@ -409,18 +412,18 @@ static int ses_intf_add(struct class_device *cdev,
409{ 412{
410 struct scsi_device *sdev = to_scsi_device(cdev->dev); 413 struct scsi_device *sdev = to_scsi_device(cdev->dev);
411 struct scsi_device *tmp_sdev; 414 struct scsi_device *tmp_sdev;
412 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr, 415 unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr = NULL,
413 *addl_desc_ptr; 416 *addl_desc_ptr = NULL;
414 struct ses_device *ses_dev; 417 struct ses_device *ses_dev;
415 u32 result; 418 u32 result;
416 int i, j, types, len, components = 0; 419 int i, j, types, len, page7_len = 0, components = 0;
417 int err = -ENOMEM; 420 int err = -ENOMEM;
418 struct enclosure_device *edev; 421 struct enclosure_device *edev;
419 struct ses_component *scomp; 422 struct ses_component *scomp = NULL;
420 423
421 if (!scsi_device_enclosure(sdev)) { 424 if (!scsi_device_enclosure(sdev)) {
422 /* not an enclosure, but might be in one */ 425 /* not an enclosure, but might be in one */
423 edev = enclosure_find(&sdev->host->shost_gendev); 426 edev = enclosure_find(&sdev->host->shost_gendev);
424 if (edev) { 427 if (edev) {
425 ses_match_to_enclosure(edev, sdev); 428 ses_match_to_enclosure(edev, sdev);
426 class_device_put(&edev->cdev); 429 class_device_put(&edev->cdev);
@@ -447,7 +450,7 @@ static int ses_intf_add(struct class_device *cdev,
447 * traversal routines more complex */ 450 * traversal routines more complex */
448 sdev_printk(KERN_ERR, sdev, 451 sdev_printk(KERN_ERR, sdev,
449 "FIXME driver has no support for subenclosures (%d)\n", 452 "FIXME driver has no support for subenclosures (%d)\n",
450 buf[1]); 453 hdr_buf[1]);
451 goto err_free; 454 goto err_free;
452 } 455 }
453 456
@@ -456,23 +459,22 @@ static int ses_intf_add(struct class_device *cdev,
456 if (!buf) 459 if (!buf)
457 goto err_free; 460 goto err_free;
458 461
459 ses_dev->page1 = buf;
460 ses_dev->page1_len = len;
461
462 result = ses_recv_diag(sdev, 1, buf, len); 462 result = ses_recv_diag(sdev, 1, buf, len);
463 if (result) 463 if (result)
464 goto recv_failed; 464 goto recv_failed;
465 465
466 types = buf[10]; 466 types = buf[10];
467 len = buf[11];
468 467
469 type_ptr = buf + 12 + len; 468 type_ptr = buf + 12 + buf[11];
470 469
471 for (i = 0; i < types; i++, type_ptr += 4) { 470 for (i = 0; i < types; i++, type_ptr += 4) {
472 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || 471 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
473 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) 472 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
474 components += type_ptr[1]; 473 components += type_ptr[1];
475 } 474 }
475 ses_dev->page1 = buf;
476 ses_dev->page1_len = len;
477 buf = NULL;
476 478
477 result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE); 479 result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
478 if (result) 480 if (result)
@@ -489,28 +491,29 @@ static int ses_intf_add(struct class_device *cdev,
489 goto recv_failed; 491 goto recv_failed;
490 ses_dev->page2 = buf; 492 ses_dev->page2 = buf;
491 ses_dev->page2_len = len; 493 ses_dev->page2_len = len;
494 buf = NULL;
492 495
493 /* The additional information page --- allows us 496 /* The additional information page --- allows us
494 * to match up the devices */ 497 * to match up the devices */
495 result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE); 498 result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
496 if (result) 499 if (!result) {
497 goto no_page10; 500
498 501 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
499 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 502 buf = kzalloc(len, GFP_KERNEL);
500 buf = kzalloc(len, GFP_KERNEL); 503 if (!buf)
501 if (!buf) 504 goto err_free;
502 goto err_free; 505
503 506 result = ses_recv_diag(sdev, 10, buf, len);
504 result = ses_recv_diag(sdev, 10, buf, len); 507 if (result)
505 if (result) 508 goto recv_failed;
506 goto recv_failed; 509 ses_dev->page10 = buf;
507 ses_dev->page10 = buf; 510 ses_dev->page10_len = len;
508 ses_dev->page10_len = len; 511 buf = NULL;
512 }
509 513
510 no_page10: 514 scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
511 scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
512 if (!scomp) 515 if (!scomp)
513 goto err_free; 516 goto err_free;
514 517
515 edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id, 518 edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id,
516 components, &ses_enclosure_callbacks); 519 components, &ses_enclosure_callbacks);
@@ -521,17 +524,18 @@ static int ses_intf_add(struct class_device *cdev,
521 524
522 edev->scratch = ses_dev; 525 edev->scratch = ses_dev;
523 for (i = 0; i < components; i++) 526 for (i = 0; i < components; i++)
524 edev->component[i].scratch = scomp++; 527 edev->component[i].scratch = scomp + i;
525 528
526 /* Page 7 for the descriptors is optional */ 529 /* Page 7 for the descriptors is optional */
527 buf = NULL;
528 result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE); 530 result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
529 if (result) 531 if (result)
530 goto simple_populate; 532 goto simple_populate;
531 533
532 len = (hdr_buf[2] << 8) + hdr_buf[3] + 4; 534 page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
533 /* add 1 for trailing '\0' we'll use */ 535 /* add 1 for trailing '\0' we'll use */
534 buf = kzalloc(len + 1, GFP_KERNEL); 536 buf = kzalloc(len + 1, GFP_KERNEL);
537 if (!buf)
538 goto simple_populate;
535 result = ses_recv_diag(sdev, 7, buf, len); 539 result = ses_recv_diag(sdev, 7, buf, len);
536 if (result) { 540 if (result) {
537 simple_populate: 541 simple_populate:
@@ -544,7 +548,8 @@ static int ses_intf_add(struct class_device *cdev,
544 len = (desc_ptr[2] << 8) + desc_ptr[3]; 548 len = (desc_ptr[2] << 8) + desc_ptr[3];
545 /* skip past overall descriptor */ 549 /* skip past overall descriptor */
546 desc_ptr += len + 4; 550 desc_ptr += len + 4;
547 addl_desc_ptr = ses_dev->page10 + 8; 551 if (ses_dev->page10)
552 addl_desc_ptr = ses_dev->page10 + 8;
548 } 553 }
549 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; 554 type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
550 components = 0; 555 components = 0;
@@ -554,29 +559,35 @@ static int ses_intf_add(struct class_device *cdev,
554 struct enclosure_component *ecomp; 559 struct enclosure_component *ecomp;
555 560
556 if (desc_ptr) { 561 if (desc_ptr) {
557 len = (desc_ptr[2] << 8) + desc_ptr[3]; 562 if (desc_ptr >= buf + page7_len) {
558 desc_ptr += 4; 563 desc_ptr = NULL;
559 /* Add trailing zero - pushes into 564 } else {
560 * reserved space */ 565 len = (desc_ptr[2] << 8) + desc_ptr[3];
561 desc_ptr[len] = '\0'; 566 desc_ptr += 4;
562 name = desc_ptr; 567 /* Add trailing zero - pushes into
568 * reserved space */
569 desc_ptr[len] = '\0';
570 name = desc_ptr;
571 }
563 } 572 }
564 if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE && 573 if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
565 type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE) 574 type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
566 continue; 575
567 ecomp = enclosure_component_register(edev, 576 ecomp = enclosure_component_register(edev,
568 components++, 577 components++,
569 type_ptr[0], 578 type_ptr[0],
570 name); 579 name);
571 if (desc_ptr) { 580
572 desc_ptr += len; 581 if (!IS_ERR(ecomp) && addl_desc_ptr)
573 if (!IS_ERR(ecomp))
574 ses_process_descriptor(ecomp, 582 ses_process_descriptor(ecomp,
575 addl_desc_ptr); 583 addl_desc_ptr);
576
577 if (addl_desc_ptr)
578 addl_desc_ptr += addl_desc_ptr[1] + 2;
579 } 584 }
585 if (desc_ptr)
586 desc_ptr += len;
587
588 if (addl_desc_ptr)
589 addl_desc_ptr += addl_desc_ptr[1] + 2;
590
580 } 591 }
581 } 592 }
582 kfree(buf); 593 kfree(buf);
@@ -598,6 +609,7 @@ static int ses_intf_add(struct class_device *cdev,
598 err = -ENODEV; 609 err = -ENODEV;
599 err_free: 610 err_free:
600 kfree(buf); 611 kfree(buf);
612 kfree(scomp);
601 kfree(ses_dev->page10); 613 kfree(ses_dev->page10);
602 kfree(ses_dev->page2); 614 kfree(ses_dev->page2);
603 kfree(ses_dev->page1); 615 kfree(ses_dev->page1);
@@ -630,6 +642,7 @@ static void ses_intf_remove(struct class_device *cdev,
630 ses_dev = edev->scratch; 642 ses_dev = edev->scratch;
631 edev->scratch = NULL; 643 edev->scratch = NULL;
632 644
645 kfree(ses_dev->page10);
633 kfree(ses_dev->page1); 646 kfree(ses_dev->page1);
634 kfree(ses_dev->page2); 647 kfree(ses_dev->page2);
635 kfree(ses_dev); 648 kfree(ses_dev);
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 71952703125a..0a52d9d2da2c 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20080117"; 20static const char *verstr = "20080221";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -1172,7 +1172,7 @@ static int st_open(struct inode *inode, struct file *filp)
1172 STp->try_dio_now = STp->try_dio; 1172 STp->try_dio_now = STp->try_dio;
1173 STp->recover_count = 0; 1173 STp->recover_count = 0;
1174 DEB( STp->nbr_waits = STp->nbr_finished = 0; 1174 DEB( STp->nbr_waits = STp->nbr_finished = 0;
1175 STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = STp->nbr_combinable = 0; ) 1175 STp->nbr_requests = STp->nbr_dio = STp->nbr_pages = 0; )
1176 1176
1177 retval = check_tape(STp, filp); 1177 retval = check_tape(STp, filp);
1178 if (retval < 0) 1178 if (retval < 0)
@@ -1226,8 +1226,8 @@ static int st_flush(struct file *filp, fl_owner_t id)
1226 } 1226 }
1227 1227
1228 DEBC( if (STp->nbr_requests) 1228 DEBC( if (STp->nbr_requests)
1229 printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d (%d).\n", 1229 printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d.\n",
1230 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages, STp->nbr_combinable)); 1230 name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages));
1231 1231
1232 if (STps->rw == ST_WRITING && !STp->pos_unknown) { 1232 if (STps->rw == ST_WRITING && !STp->pos_unknown) {
1233 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat; 1233 struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
@@ -1422,9 +1422,6 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
1422 if (STbp->do_dio) { 1422 if (STbp->do_dio) {
1423 STp->nbr_dio++; 1423 STp->nbr_dio++;
1424 STp->nbr_pages += STbp->do_dio; 1424 STp->nbr_pages += STbp->do_dio;
1425 for (i=1; i < STbp->do_dio; i++)
1426 if (page_to_pfn(STbp->sg[i].page) == page_to_pfn(STbp->sg[i-1].page) + 1)
1427 STp->nbr_combinable++;
1428 } 1425 }
1429 ) 1426 )
1430 } else 1427 } else
diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h
index 6c8075712974..5931726fcf93 100644
--- a/drivers/scsi/st.h
+++ b/drivers/scsi/st.h
@@ -164,7 +164,6 @@ struct scsi_tape {
164 int nbr_requests; 164 int nbr_requests;
165 int nbr_dio; 165 int nbr_dio;
166 int nbr_pages; 166 int nbr_pages;
167 int nbr_combinable;
168 unsigned char last_cmnd[6]; 167 unsigned char last_cmnd[6];
169 unsigned char last_sense[16]; 168 unsigned char last_sense[16];
170#endif 169#endif
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 72f6d8015358..654430edf74d 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -461,30 +461,14 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
461 } 461 }
462} 462}
463 463
464static int stex_direct_copy(struct scsi_cmnd *cmd,
465 const void *src, size_t count)
466{
467 size_t cp_len = count;
468 int n_elem = 0;
469
470 n_elem = scsi_dma_map(cmd);
471 if (n_elem < 0)
472 return 0;
473
474 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
475
476 scsi_dma_unmap(cmd);
477
478 return cp_len == count;
479}
480
481static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) 464static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
482{ 465{
483 struct st_frame *p; 466 struct st_frame *p;
484 size_t count = sizeof(struct st_frame); 467 size_t count = sizeof(struct st_frame);
485 468
486 p = hba->copy_buffer; 469 p = hba->copy_buffer;
487 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_FROM_CMD); 470 stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd),
471 ST_FROM_CMD);
488 memset(p->base, 0, sizeof(u32)*6); 472 memset(p->base, 0, sizeof(u32)*6);
489 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0); 473 *(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
490 p->rom_addr = 0; 474 p->rom_addr = 0;
@@ -502,7 +486,8 @@ static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
502 p->subid = 486 p->subid =
503 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device; 487 hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
504 488
505 stex_internal_copy(ccb->cmd, p, &count, ccb->sg_count, ST_TO_CMD); 489 stex_internal_copy(ccb->cmd, p, &count, scsi_sg_count(ccb->cmd),
490 ST_TO_CMD);
506} 491}
507 492
508static void 493static void
@@ -569,8 +554,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
569 unsigned char page; 554 unsigned char page;
570 page = cmd->cmnd[2] & 0x3f; 555 page = cmd->cmnd[2] & 0x3f;
571 if (page == 0x8 || page == 0x3f) { 556 if (page == 0x8 || page == 0x3f) {
572 stex_direct_copy(cmd, ms10_caching_page, 557 size_t cp_len = sizeof(ms10_caching_page);
573 sizeof(ms10_caching_page)); 558 stex_internal_copy(cmd, ms10_caching_page,
559 &cp_len, scsi_sg_count(cmd),
560 ST_TO_CMD);
574 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 561 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
575 done(cmd); 562 done(cmd);
576 } else 563 } else
@@ -599,8 +586,10 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
599 if (id != host->max_id - 1) 586 if (id != host->max_id - 1)
600 break; 587 break;
601 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { 588 if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
602 stex_direct_copy(cmd, console_inq_page, 589 size_t cp_len = sizeof(console_inq_page);
603 sizeof(console_inq_page)); 590 stex_internal_copy(cmd, console_inq_page,
591 &cp_len, scsi_sg_count(cmd),
592 ST_TO_CMD);
604 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 593 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
605 done(cmd); 594 done(cmd);
606 } else 595 } else
@@ -609,6 +598,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
609 case PASSTHRU_CMD: 598 case PASSTHRU_CMD:
610 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) { 599 if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
611 struct st_drvver ver; 600 struct st_drvver ver;
601 size_t cp_len = sizeof(ver);
612 ver.major = ST_VER_MAJOR; 602 ver.major = ST_VER_MAJOR;
613 ver.minor = ST_VER_MINOR; 603 ver.minor = ST_VER_MINOR;
614 ver.oem = ST_OEM; 604 ver.oem = ST_OEM;
@@ -616,7 +606,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
616 ver.signature[0] = PASSTHRU_SIGNATURE; 606 ver.signature[0] = PASSTHRU_SIGNATURE;
617 ver.console_id = host->max_id - 1; 607 ver.console_id = host->max_id - 1;
618 ver.host_no = hba->host->host_no; 608 ver.host_no = hba->host->host_no;
619 cmd->result = stex_direct_copy(cmd, &ver, sizeof(ver)) ? 609 stex_internal_copy(cmd, &ver, &cp_len,
610 scsi_sg_count(cmd), ST_TO_CMD);
611 cmd->result = sizeof(ver) == cp_len ?
620 DID_OK << 16 | COMMAND_COMPLETE << 8 : 612 DID_OK << 16 | COMMAND_COMPLETE << 8 :
621 DID_ERROR << 16 | COMMAND_COMPLETE << 8; 613 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
622 done(cmd); 614 done(cmd);
@@ -709,7 +701,7 @@ static void stex_copy_data(struct st_ccb *ccb,
709 if (ccb->cmd == NULL) 701 if (ccb->cmd == NULL)
710 return; 702 return;
711 stex_internal_copy(ccb->cmd, 703 stex_internal_copy(ccb->cmd,
712 resp->variable, &count, ccb->sg_count, ST_TO_CMD); 704 resp->variable, &count, scsi_sg_count(ccb->cmd), ST_TO_CMD);
713} 705}
714 706
715static void stex_ys_commands(struct st_hba *hba, 707static void stex_ys_commands(struct st_hba *hba,
@@ -734,7 +726,7 @@ static void stex_ys_commands(struct st_hba *hba,
734 726
735 count = STEX_EXTRA_SIZE; 727 count = STEX_EXTRA_SIZE;
736 stex_internal_copy(ccb->cmd, hba->copy_buffer, 728 stex_internal_copy(ccb->cmd, hba->copy_buffer,
737 &count, ccb->sg_count, ST_FROM_CMD); 729 &count, scsi_sg_count(ccb->cmd), ST_FROM_CMD);
738 inq_data = (ST_INQ *)hba->copy_buffer; 730 inq_data = (ST_INQ *)hba->copy_buffer;
739 if (inq_data->DeviceTypeQualifier != 0) 731 if (inq_data->DeviceTypeQualifier != 0)
740 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT; 732 ccb->srb_status = SRB_STATUS_SELECTION_TIMEOUT;
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 6325901e5093..f7d279542fa5 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -187,10 +187,10 @@
187#define sym53c416_base_2 sym53c416_2 187#define sym53c416_base_2 sym53c416_2
188#define sym53c416_base_3 sym53c416_3 188#define sym53c416_base_3 sym53c416_3
189 189
190static unsigned int sym53c416_base[2] = {0,0}; 190static unsigned int sym53c416_base[2];
191static unsigned int sym53c416_base_1[2] = {0,0}; 191static unsigned int sym53c416_base_1[2];
192static unsigned int sym53c416_base_2[2] = {0,0}; 192static unsigned int sym53c416_base_2[2];
193static unsigned int sym53c416_base_3[2] = {0,0}; 193static unsigned int sym53c416_base_3[2];
194 194
195#endif 195#endif
196 196
@@ -621,25 +621,25 @@ int __init sym53c416_detect(struct scsi_host_template *tpnt)
621 int ints[3]; 621 int ints[3];
622 622
623 ints[0] = 2; 623 ints[0] = 2;
624 if(sym53c416_base) 624 if(sym53c416_base[0])
625 { 625 {
626 ints[1] = sym53c416_base[0]; 626 ints[1] = sym53c416_base[0];
627 ints[2] = sym53c416_base[1]; 627 ints[2] = sym53c416_base[1];
628 sym53c416_setup(NULL, ints); 628 sym53c416_setup(NULL, ints);
629 } 629 }
630 if(sym53c416_base_1) 630 if(sym53c416_base_1[0])
631 { 631 {
632 ints[1] = sym53c416_base_1[0]; 632 ints[1] = sym53c416_base_1[0];
633 ints[2] = sym53c416_base_1[1]; 633 ints[2] = sym53c416_base_1[1];
634 sym53c416_setup(NULL, ints); 634 sym53c416_setup(NULL, ints);
635 } 635 }
636 if(sym53c416_base_2) 636 if(sym53c416_base_2[0])
637 { 637 {
638 ints[1] = sym53c416_base_2[0]; 638 ints[1] = sym53c416_base_2[0];
639 ints[2] = sym53c416_base_2[1]; 639 ints[2] = sym53c416_base_2[1];
640 sym53c416_setup(NULL, ints); 640 sym53c416_setup(NULL, ints);
641 } 641 }
642 if(sym53c416_base_3) 642 if(sym53c416_base_3[0])
643 { 643 {
644 ints[1] = sym53c416_base_3[0]; 644 ints[1] = sym53c416_base_3[0];
645 ints[2] = sym53c416_base_3[1]; 645 ints[2] = sym53c416_base_3[1];
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index fad245b064d6..d57bf3e708d8 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -549,7 +549,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
549 atmel_handle_transmit(port, pending); 549 atmel_handle_transmit(port, pending);
550 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); 550 } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
551 551
552 return IRQ_HANDLED; 552 return pass_counter ? IRQ_HANDLED : IRQ_NONE;
553} 553}
554 554
555/* 555/*
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index ddf639144538..9ce12cb2cebc 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -393,7 +393,7 @@ static void sci_init_pins_scif(struct uart_port *port, unsigned int cflag)
393 if (cflag & CRTSCTS) { 393 if (cflag & CRTSCTS) {
394 fcr_val |= SCFCR_MCE; 394 fcr_val |= SCFCR_MCE;
395 } else { 395 } else {
396#ifdef CONFIG_CPU_SUBTYPE_SH7343 396#if defined(CONFIG_CPU_SUBTYPE_SH7343) || defined(CONFIG_CPU_SUBTYPE_SH7366)
397 /* Nothing */ 397 /* Nothing */
398#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 398#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
399 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 399 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index f5764ebcfe07..01a9dd715f5d 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -97,13 +97,18 @@
97# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 97# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
98# define SCIF_ONLY 98# define SCIF_ONLY
99# define PORT_PSCR 0xA405011E 99# define PORT_PSCR 0xA405011E
100#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
101# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */
102# define SCSPTR0 SCPDR0
103# define SCIF_ORER 0x0001 /* overrun error bit */
104# define SCSCR_INIT(port) 0x0038 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
105# define SCIF_ONLY
100#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) 106#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
101# define SCSPTR2 0xffe80020 /* 16 bit SCIF */ 107# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
102# define SCIF_ORER 0x0001 /* overrun error bit */ 108# define SCIF_ORER 0x0001 /* overrun error bit */
103# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ 109# define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */
104# define SCIF_ONLY 110# define SCIF_ONLY
105#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) 111#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
106# include <asm/hardware.h>
107# define SCIF_BASE_ADDR 0x01030000 112# define SCIF_BASE_ADDR 0x01030000
108# define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR 113# define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR
109# define SCIF_PTR2_OFFS 0x0000020 114# define SCIF_PTR2_OFFS 0x0000020
@@ -577,7 +582,7 @@ static inline int sci_rxd_in(struct uart_port *port)
577 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ 582 return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
578 return 1; 583 return 1;
579} 584}
580#elif defined(CONFIG_CPU_SUBTYPE_SH7722) 585#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || defined(CONFIG_CPU_SUBTYPE_SH7366)
581static inline int sci_rxd_in(struct uart_port *port) 586static inline int sci_rxd_in(struct uart_port *port)
582{ 587{
583 if (port->mapbase == 0xffe00000) 588 if (port->mapbase == 0xffe00000)
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index e52a6296ca46..9cfcfd8dad5e 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -31,6 +31,7 @@
31#include <asm/mach/dma.h> 31#include <asm/mach/dma.h>
32#include <asm/mach/sysasic.h> 32#include <asm/mach/sysasic.h>
33#include <asm/mach/maple.h> 33#include <asm/mach/maple.h>
34#include <linux/delay.h>
34 35
35MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin"); 36MODULE_AUTHOR("Yaegshi Takeshi, Paul Mundt, M.R. Brown, Adrian McMenamin");
36MODULE_DESCRIPTION("Maple bus driver for Dreamcast"); 37MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
@@ -53,12 +54,12 @@ static struct device maple_bus;
53static int subdevice_map[MAPLE_PORTS]; 54static int subdevice_map[MAPLE_PORTS];
54static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; 55static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr;
55static unsigned long maple_pnp_time; 56static unsigned long maple_pnp_time;
56static int started, scanning, liststatus; 57static int started, scanning, liststatus, realscan;
57static struct kmem_cache *maple_queue_cache; 58static struct kmem_cache *maple_queue_cache;
58 59
59struct maple_device_specify { 60struct maple_device_specify {
60 int port; 61 int port;
61 int unit; 62 int unit;
62}; 63};
63 64
64/** 65/**
@@ -68,22 +69,22 @@ struct maple_device_specify {
68 */ 69 */
69int maple_driver_register(struct device_driver *drv) 70int maple_driver_register(struct device_driver *drv)
70{ 71{
71 if (!drv) 72 if (!drv)
72 return -EINVAL; 73 return -EINVAL;
73 drv->bus = &maple_bus_type; 74 drv->bus = &maple_bus_type;
74 return driver_register(drv); 75 return driver_register(drv);
75} 76}
76EXPORT_SYMBOL_GPL(maple_driver_register); 77EXPORT_SYMBOL_GPL(maple_driver_register);
77 78
78/* set hardware registers to enable next round of dma */ 79/* set hardware registers to enable next round of dma */
79static void maplebus_dma_reset(void) 80static void maplebus_dma_reset(void)
80{ 81{
81 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET); 82 ctrl_outl(MAPLE_MAGIC, MAPLE_RESET);
82 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */ 83 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
83 ctrl_outl(1, MAPLE_TRIGTYPE); 84 ctrl_outl(1, MAPLE_TRIGTYPE);
84 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED); 85 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(50000), MAPLE_SPEED);
85 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); 86 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR);
86 ctrl_outl(1, MAPLE_ENABLE); 87 ctrl_outl(1, MAPLE_ENABLE);
87} 88}
88 89
89/** 90/**
@@ -94,27 +95,36 @@ static void maplebus_dma_reset(void)
94 * @function: the function code for the device 95 * @function: the function code for the device
95 */ 96 */
96void maple_getcond_callback(struct maple_device *dev, 97void maple_getcond_callback(struct maple_device *dev,
97 void (*callback) (struct mapleq * mq), 98 void (*callback) (struct mapleq *mq),
98 unsigned long interval, unsigned long function) 99 unsigned long interval, unsigned long function)
99{ 100{
100 dev->callback = callback; 101 dev->callback = callback;
101 dev->interval = interval; 102 dev->interval = interval;
102 dev->function = cpu_to_be32(function); 103 dev->function = cpu_to_be32(function);
103 dev->when = jiffies; 104 dev->when = jiffies;
104} 105}
105EXPORT_SYMBOL_GPL(maple_getcond_callback); 106EXPORT_SYMBOL_GPL(maple_getcond_callback);
106 107
107static int maple_dma_done(void) 108static int maple_dma_done(void)
108{ 109{
109 return (ctrl_inl(MAPLE_STATE) & 1) == 0; 110 return (ctrl_inl(MAPLE_STATE) & 1) == 0;
110} 111}
111 112
112static void maple_release_device(struct device *dev) 113static void maple_release_device(struct device *dev)
113{ 114{
114 if (dev->type) { 115 struct maple_device *mdev;
115 kfree(dev->type->name); 116 struct mapleq *mq;
116 kfree(dev->type); 117 if (!dev)
117 } 118 return;
119 mdev = to_maple_dev(dev);
120 mq = mdev->mq;
121 if (mq) {
122 if (mq->recvbufdcsp)
123 kmem_cache_free(maple_queue_cache, mq->recvbufdcsp);
124 kfree(mq);
125 mq = NULL;
126 }
127 kfree(mdev);
118} 128}
119 129
120/** 130/**
@@ -123,60 +133,64 @@ static void maple_release_device(struct device *dev)
123 */ 133 */
124void maple_add_packet(struct mapleq *mq) 134void maple_add_packet(struct mapleq *mq)
125{ 135{
126 mutex_lock(&maple_list_lock); 136 mutex_lock(&maple_list_lock);
127 list_add(&mq->list, &maple_waitq); 137 list_add(&mq->list, &maple_waitq);
128 mutex_unlock(&maple_list_lock); 138 mutex_unlock(&maple_list_lock);
129} 139}
130EXPORT_SYMBOL_GPL(maple_add_packet); 140EXPORT_SYMBOL_GPL(maple_add_packet);
131 141
132static struct mapleq *maple_allocq(struct maple_device *dev) 142static struct mapleq *maple_allocq(struct maple_device *mdev)
133{ 143{
134 struct mapleq *mq; 144 struct mapleq *mq;
135 145
136 mq = kmalloc(sizeof(*mq), GFP_KERNEL); 146 mq = kmalloc(sizeof(*mq), GFP_KERNEL);
137 if (!mq) 147 if (!mq)
138 return NULL; 148 return NULL;
139 149
140 mq->dev = dev; 150 mq->dev = mdev;
141 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL); 151 mq->recvbufdcsp = kmem_cache_zalloc(maple_queue_cache, GFP_KERNEL);
142 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp); 152 mq->recvbuf = (void *) P2SEGADDR(mq->recvbufdcsp);
143 if (!mq->recvbuf) { 153 if (!mq->recvbuf) {
144 kfree(mq); 154 kfree(mq);
145 return NULL; 155 return NULL;
146 } 156 }
147 157
148 return mq; 158 return mq;
149} 159}
150 160
151static struct maple_device *maple_alloc_dev(int port, int unit) 161static struct maple_device *maple_alloc_dev(int port, int unit)
152{ 162{
153 struct maple_device *dev; 163 struct maple_device *mdev;
154 164
155 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 165 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
156 if (!dev) 166 if (!mdev)
157 return NULL; 167 return NULL;
158 168
159 dev->port = port; 169 mdev->port = port;
160 dev->unit = unit; 170 mdev->unit = unit;
161 dev->mq = maple_allocq(dev); 171 mdev->mq = maple_allocq(mdev);
162 172
163 if (!dev->mq) { 173 if (!mdev->mq) {
164 kfree(dev); 174 kfree(mdev);
165 return NULL; 175 return NULL;
166 } 176 }
167 177 mdev->dev.bus = &maple_bus_type;
168 return dev; 178 mdev->dev.parent = &maple_bus;
179 mdev->function = 0;
180 return mdev;
169} 181}
170 182
171static void maple_free_dev(struct maple_device *mdev) 183static void maple_free_dev(struct maple_device *mdev)
172{ 184{
173 if (!mdev) 185 if (!mdev)
174 return; 186 return;
175 if (mdev->mq) { 187 if (mdev->mq) {
176 kmem_cache_free(maple_queue_cache, mdev->mq->recvbufdcsp); 188 if (mdev->mq->recvbufdcsp)
177 kfree(mdev->mq); 189 kmem_cache_free(maple_queue_cache,
178 } 190 mdev->mq->recvbufdcsp);
179 kfree(mdev); 191 kfree(mdev->mq);
192 }
193 kfree(mdev);
180} 194}
181 195
182/* process the command queue into a maple command block 196/* process the command queue into a maple command block
@@ -184,153 +198,162 @@ static void maple_free_dev(struct maple_device *mdev)
184 */ 198 */
185static void maple_build_block(struct mapleq *mq) 199static void maple_build_block(struct mapleq *mq)
186{ 200{
187 int port, unit, from, to, len; 201 int port, unit, from, to, len;
188 unsigned long *lsendbuf = mq->sendbuf; 202 unsigned long *lsendbuf = mq->sendbuf;
189 203
190 port = mq->dev->port & 3; 204 port = mq->dev->port & 3;
191 unit = mq->dev->unit; 205 unit = mq->dev->unit;
192 len = mq->length; 206 len = mq->length;
193 from = port << 6; 207 from = port << 6;
194 to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20); 208 to = (port << 6) | (unit > 0 ? (1 << (unit - 1)) & 0x1f : 0x20);
195 209
196 *maple_lastptr &= 0x7fffffff; 210 *maple_lastptr &= 0x7fffffff;
197 maple_lastptr = maple_sendptr; 211 maple_lastptr = maple_sendptr;
198 212
199 *maple_sendptr++ = (port << 16) | len | 0x80000000; 213 *maple_sendptr++ = (port << 16) | len | 0x80000000;
200 *maple_sendptr++ = PHYSADDR(mq->recvbuf); 214 *maple_sendptr++ = PHYSADDR(mq->recvbuf);
201 *maple_sendptr++ = 215 *maple_sendptr++ =
202 mq->command | (to << 8) | (from << 16) | (len << 24); 216 mq->command | (to << 8) | (from << 16) | (len << 24);
203 217
204 while (len-- > 0) 218 while (len-- > 0)
205 *maple_sendptr++ = *lsendbuf++; 219 *maple_sendptr++ = *lsendbuf++;
206} 220}
207 221
208/* build up command queue */ 222/* build up command queue */
209static void maple_send(void) 223static void maple_send(void)
210{ 224{
211 int i; 225 int i;
212 int maple_packets; 226 int maple_packets;
213 struct mapleq *mq, *nmq; 227 struct mapleq *mq, *nmq;
214 228
215 if (!list_empty(&maple_sentq)) 229 if (!list_empty(&maple_sentq))
216 return; 230 return;
217 if (list_empty(&maple_waitq) || !maple_dma_done()) 231 if (list_empty(&maple_waitq) || !maple_dma_done())
218 return; 232 return;
219 maple_packets = 0; 233 maple_packets = 0;
220 maple_sendptr = maple_lastptr = maple_sendbuf; 234 maple_sendptr = maple_lastptr = maple_sendbuf;
221 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) { 235 list_for_each_entry_safe(mq, nmq, &maple_waitq, list) {
222 maple_build_block(mq); 236 maple_build_block(mq);
223 list_move(&mq->list, &maple_sentq); 237 list_move(&mq->list, &maple_sentq);
224 if (maple_packets++ > MAPLE_MAXPACKETS) 238 if (maple_packets++ > MAPLE_MAXPACKETS)
225 break; 239 break;
226 } 240 }
227 if (maple_packets > 0) { 241 if (maple_packets > 0) {
228 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) 242 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
229 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 243 dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE,
230 PAGE_SIZE, DMA_BIDIRECTIONAL); 244 PAGE_SIZE, DMA_BIDIRECTIONAL);
231 } 245 }
232} 246}
233 247
234static int attach_matching_maple_driver(struct device_driver *driver, 248static int attach_matching_maple_driver(struct device_driver *driver,
235 void *devptr) 249 void *devptr)
236{ 250{
237 struct maple_driver *maple_drv; 251 struct maple_driver *maple_drv;
238 struct maple_device *mdev; 252 struct maple_device *mdev;
239 253
240 mdev = devptr; 254 mdev = devptr;
241 maple_drv = to_maple_driver(driver); 255 maple_drv = to_maple_driver(driver);
242 if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) { 256 if (mdev->devinfo.function & be32_to_cpu(maple_drv->function)) {
243 if (maple_drv->connect(mdev) == 0) { 257 if (maple_drv->connect(mdev) == 0) {
244 mdev->driver = maple_drv; 258 mdev->driver = maple_drv;
245 return 1; 259 return 1;
246 } 260 }
247 } 261 }
248 return 0; 262 return 0;
249} 263}
250 264
251static void maple_detach_driver(struct maple_device *mdev) 265static void maple_detach_driver(struct maple_device *mdev)
252{ 266{
253 if (!mdev) 267 if (!mdev)
254 return; 268 return;
255 if (mdev->driver) { 269 if (mdev->driver) {
256 if (mdev->driver->disconnect) 270 if (mdev->driver->disconnect)
257 mdev->driver->disconnect(mdev); 271 mdev->driver->disconnect(mdev);
258 } 272 }
259 mdev->driver = NULL; 273 mdev->driver = NULL;
260 if (mdev->registered) { 274 device_unregister(&mdev->dev);
261 maple_release_device(&mdev->dev); 275 mdev = NULL;
262 device_unregister(&mdev->dev);
263 }
264 mdev->registered = 0;
265 maple_free_dev(mdev);
266} 276}
267 277
268/* process initial MAPLE_COMMAND_DEVINFO for each device or port */ 278/* process initial MAPLE_COMMAND_DEVINFO for each device or port */
269static void maple_attach_driver(struct maple_device *dev) 279static void maple_attach_driver(struct maple_device *mdev)
270{ 280{
271 char *p; 281 char *p, *recvbuf;
272 282 unsigned long function;
273 char *recvbuf; 283 int matched, retval;
274 unsigned long function; 284
275 int matched, retval; 285 recvbuf = mdev->mq->recvbuf;
276 286 /* copy the data as individual elements in
277 recvbuf = dev->mq->recvbuf; 287 * case of memory optimisation */
278 memcpy(&dev->devinfo, recvbuf + 4, sizeof(dev->devinfo)); 288 memcpy(&mdev->devinfo.function, recvbuf + 4, 4);
279 memcpy(dev->product_name, dev->devinfo.product_name, 30); 289 memcpy(&mdev->devinfo.function_data[0], recvbuf + 8, 12);
280 memcpy(dev->product_licence, dev->devinfo.product_licence, 60); 290 memcpy(&mdev->devinfo.area_code, recvbuf + 20, 1);
281 dev->product_name[30] = '\0'; 291 memcpy(&mdev->devinfo.connector_direction, recvbuf + 21, 1);
282 dev->product_licence[60] = '\0'; 292 memcpy(&mdev->devinfo.product_name[0], recvbuf + 22, 30);
283 293 memcpy(&mdev->devinfo.product_licence[0], recvbuf + 52, 60);
284 for (p = dev->product_name + 29; dev->product_name <= p; p--) 294 memcpy(&mdev->devinfo.standby_power, recvbuf + 112, 2);
285 if (*p == ' ') 295 memcpy(&mdev->devinfo.max_power, recvbuf + 114, 2);
286 *p = '\0'; 296 memcpy(mdev->product_name, mdev->devinfo.product_name, 30);
287 else 297 mdev->product_name[30] = '\0';
288 break; 298 memcpy(mdev->product_licence, mdev->devinfo.product_licence, 60);
289 299 mdev->product_licence[60] = '\0';
290 for (p = dev->product_licence + 59; dev->product_licence <= p; p--) 300
291 if (*p == ' ') 301 for (p = mdev->product_name + 29; mdev->product_name <= p; p--)
292 *p = '\0'; 302 if (*p == ' ')
293 else 303 *p = '\0';
294 break; 304 else
295 305 break;
296 function = be32_to_cpu(dev->devinfo.function); 306 for (p = mdev->product_licence + 59; mdev->product_licence <= p; p--)
297 307 if (*p == ' ')
298 if (function > 0x200) { 308 *p = '\0';
299 /* Do this silently - as not a real device */ 309 else
300 function = 0; 310 break;
301 dev->driver = &maple_dummy_driver; 311
302 sprintf(dev->dev.bus_id, "%d:0.port", dev->port); 312 if (realscan) {
303 } else { 313 printk(KERN_INFO "Maple device detected: %s\n",
304 printk(KERN_INFO 314 mdev->product_name);
305 "Maple bus at (%d, %d): Connected function 0x%lX\n", 315 printk(KERN_INFO "Maple device: %s\n", mdev->product_licence);
306 dev->port, dev->unit, function); 316 }
307 317
308 matched = 318 function = be32_to_cpu(mdev->devinfo.function);
309 bus_for_each_drv(&maple_bus_type, NULL, dev, 319
310 attach_matching_maple_driver); 320 if (function > 0x200) {
311 321 /* Do this silently - as not a real device */
312 if (matched == 0) { 322 function = 0;
313 /* Driver does not exist yet */ 323 mdev->driver = &maple_dummy_driver;
314 printk(KERN_INFO 324 sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port);
315 "No maple driver found for this device\n"); 325 } else {
316 dev->driver = &maple_dummy_driver; 326 if (realscan)
317 } 327 printk(KERN_INFO
318 328 "Maple bus at (%d, %d): Function 0x%lX\n",
319 sprintf(dev->dev.bus_id, "%d:0%d.%lX", dev->port, 329 mdev->port, mdev->unit, function);
320 dev->unit, function); 330
321 } 331 matched =
322 dev->function = function; 332 bus_for_each_drv(&maple_bus_type, NULL, mdev,
323 dev->dev.bus = &maple_bus_type; 333 attach_matching_maple_driver);
324 dev->dev.parent = &maple_bus; 334
325 dev->dev.release = &maple_release_device; 335 if (matched == 0) {
326 retval = device_register(&dev->dev); 336 /* Driver does not exist yet */
327 if (retval) { 337 if (realscan)
328 printk(KERN_INFO 338 printk(KERN_INFO
329 "Maple bus: Attempt to register device (%x, %x) failed.\n", 339 "No maple driver found.\n");
330 dev->port, dev->unit); 340 mdev->driver = &maple_dummy_driver;
331 maple_free_dev(dev); 341 }
332 } 342 sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port,
333 dev->registered = 1; 343 mdev->unit, function);
344 }
345 mdev->function = function;
346 mdev->dev.release = &maple_release_device;
347 retval = device_register(&mdev->dev);
348 if (retval) {
349 printk(KERN_INFO
350 "Maple bus: Attempt to register device"
351 " (%x, %x) failed.\n",
352 mdev->port, mdev->unit);
353 maple_free_dev(mdev);
354 mdev = NULL;
355 return;
356 }
334} 357}
335 358
336/* 359/*
@@ -340,270 +363,262 @@ static void maple_attach_driver(struct maple_device *dev)
340 */ 363 */
341static int detach_maple_device(struct device *device, void *portptr) 364static int detach_maple_device(struct device *device, void *portptr)
342{ 365{
343 struct maple_device_specify *ds; 366 struct maple_device_specify *ds;
344 struct maple_device *mdev; 367 struct maple_device *mdev;
345 368
346 ds = portptr; 369 ds = portptr;
347 mdev = to_maple_dev(device); 370 mdev = to_maple_dev(device);
348 if (mdev->port == ds->port && mdev->unit == ds->unit) 371 if (mdev->port == ds->port && mdev->unit == ds->unit)
349 return 1; 372 return 1;
350 return 0; 373 return 0;
351} 374}
352 375
353static int setup_maple_commands(struct device *device, void *ignored) 376static int setup_maple_commands(struct device *device, void *ignored)
354{ 377{
355 struct maple_device *maple_dev = to_maple_dev(device); 378 struct maple_device *maple_dev = to_maple_dev(device);
356 379
357 if ((maple_dev->interval > 0) 380 if ((maple_dev->interval > 0)
358 && time_after(jiffies, maple_dev->when)) { 381 && time_after(jiffies, maple_dev->when)) {
359 maple_dev->when = jiffies + maple_dev->interval; 382 maple_dev->when = jiffies + maple_dev->interval;
360 maple_dev->mq->command = MAPLE_COMMAND_GETCOND; 383 maple_dev->mq->command = MAPLE_COMMAND_GETCOND;
361 maple_dev->mq->sendbuf = &maple_dev->function; 384 maple_dev->mq->sendbuf = &maple_dev->function;
362 maple_dev->mq->length = 1; 385 maple_dev->mq->length = 1;
363 maple_add_packet(maple_dev->mq); 386 maple_add_packet(maple_dev->mq);
364 liststatus++; 387 liststatus++;
365 } else { 388 } else {
366 if (time_after(jiffies, maple_pnp_time)) { 389 if (time_after(jiffies, maple_pnp_time)) {
367 maple_dev->mq->command = MAPLE_COMMAND_DEVINFO; 390 maple_dev->mq->command = MAPLE_COMMAND_DEVINFO;
368 maple_dev->mq->length = 0; 391 maple_dev->mq->length = 0;
369 maple_add_packet(maple_dev->mq); 392 maple_add_packet(maple_dev->mq);
370 liststatus++; 393 liststatus++;
371 } 394 }
372 } 395 }
373 396
374 return 0; 397 return 0;
375} 398}
376 399
377/* VBLANK bottom half - implemented via workqueue */ 400/* VBLANK bottom half - implemented via workqueue */
378static void maple_vblank_handler(struct work_struct *work) 401static void maple_vblank_handler(struct work_struct *work)
379{ 402{
380 if (!maple_dma_done()) 403 if (!maple_dma_done())
381 return; 404 return;
382 if (!list_empty(&maple_sentq)) 405 if (!list_empty(&maple_sentq))
383 return; 406 return;
384 ctrl_outl(0, MAPLE_ENABLE); 407 ctrl_outl(0, MAPLE_ENABLE);
385 liststatus = 0; 408 liststatus = 0;
386 bus_for_each_dev(&maple_bus_type, NULL, NULL, 409 bus_for_each_dev(&maple_bus_type, NULL, NULL,
387 setup_maple_commands); 410 setup_maple_commands);
388 if (time_after(jiffies, maple_pnp_time)) 411 if (time_after(jiffies, maple_pnp_time))
389 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL; 412 maple_pnp_time = jiffies + MAPLE_PNP_INTERVAL;
390 if (liststatus && list_empty(&maple_sentq)) { 413 if (liststatus && list_empty(&maple_sentq)) {
391 INIT_LIST_HEAD(&maple_sentq); 414 INIT_LIST_HEAD(&maple_sentq);
392 maple_send(); 415 maple_send();
393 } 416 }
394 maplebus_dma_reset(); 417 maplebus_dma_reset();
395} 418}
396 419
397/* handle devices added via hotplugs - placing them on queue for DEVINFO*/ 420/* handle devices added via hotplugs - placing them on queue for DEVINFO*/
398static void maple_map_subunits(struct maple_device *mdev, int submask) 421static void maple_map_subunits(struct maple_device *mdev, int submask)
399{ 422{
400 int retval, k, devcheck; 423 int retval, k, devcheck;
401 struct maple_device *mdev_add; 424 struct maple_device *mdev_add;
402 struct maple_device_specify ds; 425 struct maple_device_specify ds;
403 426
404 for (k = 0; k < 5; k++) { 427 for (k = 0; k < 5; k++) {
405 ds.port = mdev->port; 428 ds.port = mdev->port;
406 ds.unit = k + 1; 429 ds.unit = k + 1;
407 retval = 430 retval =
408 bus_for_each_dev(&maple_bus_type, NULL, &ds, 431 bus_for_each_dev(&maple_bus_type, NULL, &ds,
409 detach_maple_device); 432 detach_maple_device);
410 if (retval) { 433 if (retval) {
411 submask = submask >> 1; 434 submask = submask >> 1;
412 continue; 435 continue;
413 } 436 }
414 devcheck = submask & 0x01; 437 devcheck = submask & 0x01;
415 if (devcheck) { 438 if (devcheck) {
416 mdev_add = maple_alloc_dev(mdev->port, k + 1); 439 mdev_add = maple_alloc_dev(mdev->port, k + 1);
417 if (!mdev_add) 440 if (!mdev_add)
418 return; 441 return;
419 mdev_add->mq->command = MAPLE_COMMAND_DEVINFO; 442 mdev_add->mq->command = MAPLE_COMMAND_DEVINFO;
420 mdev_add->mq->length = 0; 443 mdev_add->mq->length = 0;
421 maple_add_packet(mdev_add->mq); 444 maple_add_packet(mdev_add->mq);
422 scanning = 1; 445 scanning = 1;
423 } 446 }
424 submask = submask >> 1; 447 submask = submask >> 1;
425 } 448 }
426} 449}
427 450
428/* mark a device as removed */ 451/* mark a device as removed */
429static void maple_clean_submap(struct maple_device *mdev) 452static void maple_clean_submap(struct maple_device *mdev)
430{ 453{
431 int killbit; 454 int killbit;
432 455
433 killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20); 456 killbit = (mdev->unit > 0 ? (1 << (mdev->unit - 1)) & 0x1f : 0x20);
434 killbit = ~killbit; 457 killbit = ~killbit;
435 killbit &= 0xFF; 458 killbit &= 0xFF;
436 subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit; 459 subdevice_map[mdev->port] = subdevice_map[mdev->port] & killbit;
437} 460}
438 461
439/* handle empty port or hotplug removal */ 462/* handle empty port or hotplug removal */
440static void maple_response_none(struct maple_device *mdev, 463static void maple_response_none(struct maple_device *mdev,
441 struct mapleq *mq) 464 struct mapleq *mq)
442{ 465{
443 if (mdev->unit != 0) { 466 if (mdev->unit != 0) {
444 list_del(&mq->list); 467 list_del(&mq->list);
445 maple_clean_submap(mdev); 468 maple_clean_submap(mdev);
446 printk(KERN_INFO 469 printk(KERN_INFO
447 "Maple bus device detaching at (%d, %d)\n", 470 "Maple bus device detaching at (%d, %d)\n",
448 mdev->port, mdev->unit); 471 mdev->port, mdev->unit);
449 maple_detach_driver(mdev); 472 maple_detach_driver(mdev);
450 return; 473 return;
451 } 474 }
452 if (!started) { 475 if (!started) {
453 printk(KERN_INFO "No maple devices attached to port %d\n", 476 printk(KERN_INFO "No maple devices attached to port %d\n",
454 mdev->port); 477 mdev->port);
455 return; 478 return;
456 } 479 }
457 maple_clean_submap(mdev); 480 maple_clean_submap(mdev);
458} 481}
459 482
460/* preprocess hotplugs or scans */ 483/* preprocess hotplugs or scans */
461static void maple_response_devinfo(struct maple_device *mdev, 484static void maple_response_devinfo(struct maple_device *mdev,
462 char *recvbuf) 485 char *recvbuf)
463{ 486{
464 char submask; 487 char submask;
465 if ((!started) || (scanning == 2)) { 488 if ((!started) || (scanning == 2)) {
466 maple_attach_driver(mdev); 489 maple_attach_driver(mdev);
467 return; 490 return;
468 } 491 }
469 if (mdev->unit == 0) { 492 if (mdev->unit == 0) {
470 submask = recvbuf[2] & 0x1F; 493 submask = recvbuf[2] & 0x1F;
471 if (submask ^ subdevice_map[mdev->port]) { 494 if (submask ^ subdevice_map[mdev->port]) {
472 maple_map_subunits(mdev, submask); 495 maple_map_subunits(mdev, submask);
473 subdevice_map[mdev->port] = submask; 496 subdevice_map[mdev->port] = submask;
474 } 497 }
475 } 498 }
476} 499}
477 500
478/* maple dma end bottom half - implemented via workqueue */ 501/* maple dma end bottom half - implemented via workqueue */
479static void maple_dma_handler(struct work_struct *work) 502static void maple_dma_handler(struct work_struct *work)
480{ 503{
481 struct mapleq *mq, *nmq; 504 struct mapleq *mq, *nmq;
482 struct maple_device *dev; 505 struct maple_device *dev;
483 char *recvbuf; 506 char *recvbuf;
484 enum maple_code code; 507 enum maple_code code;
485 508
486 if (!maple_dma_done()) 509 if (!maple_dma_done())
487 return; 510 return;
488 ctrl_outl(0, MAPLE_ENABLE); 511 ctrl_outl(0, MAPLE_ENABLE);
489 if (!list_empty(&maple_sentq)) { 512 if (!list_empty(&maple_sentq)) {
490 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { 513 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
491 recvbuf = mq->recvbuf; 514 recvbuf = mq->recvbuf;
492 code = recvbuf[0]; 515 code = recvbuf[0];
493 dev = mq->dev; 516 dev = mq->dev;
494 switch (code) { 517 switch (code) {
495 case MAPLE_RESPONSE_NONE: 518 case MAPLE_RESPONSE_NONE:
496 maple_response_none(dev, mq); 519 maple_response_none(dev, mq);
497 break; 520 break;
498 521
499 case MAPLE_RESPONSE_DEVINFO: 522 case MAPLE_RESPONSE_DEVINFO:
500 maple_response_devinfo(dev, recvbuf); 523 maple_response_devinfo(dev, recvbuf);
501 break; 524 break;
502 525
503 case MAPLE_RESPONSE_DATATRF: 526 case MAPLE_RESPONSE_DATATRF:
504 if (dev->callback) 527 if (dev->callback)
505 dev->callback(mq); 528 dev->callback(mq);
506 break; 529 break;
507 530
508 case MAPLE_RESPONSE_FILEERR: 531 case MAPLE_RESPONSE_FILEERR:
509 case MAPLE_RESPONSE_AGAIN: 532 case MAPLE_RESPONSE_AGAIN:
510 case MAPLE_RESPONSE_BADCMD: 533 case MAPLE_RESPONSE_BADCMD:
511 case MAPLE_RESPONSE_BADFUNC: 534 case MAPLE_RESPONSE_BADFUNC:
512 printk(KERN_DEBUG 535 printk(KERN_DEBUG
513 "Maple non-fatal error 0x%X\n", 536 "Maple non-fatal error 0x%X\n",
514 code); 537 code);
515 break; 538 break;
516 539
517 case MAPLE_RESPONSE_ALLINFO: 540 case MAPLE_RESPONSE_ALLINFO:
518 printk(KERN_DEBUG 541 printk(KERN_DEBUG
519 "Maple - extended device information not supported\n"); 542 "Maple - extended device information"
520 break; 543 " not supported\n");
521 544 break;
522 case MAPLE_RESPONSE_OK: 545
523 break; 546 case MAPLE_RESPONSE_OK:
524 547 break;
525 default: 548
526 break; 549 default:
527 } 550 break;
528 } 551 }
529 INIT_LIST_HEAD(&maple_sentq); 552 }
530 if (scanning == 1) { 553 INIT_LIST_HEAD(&maple_sentq);
531 maple_send(); 554 if (scanning == 1) {
532 scanning = 2; 555 maple_send();
533 } else 556 scanning = 2;
534 scanning = 0; 557 } else
535 558 scanning = 0;
536 if (started == 0) 559
537 started = 1; 560 if (started == 0)
538 } 561 started = 1;
539 maplebus_dma_reset(); 562 }
563 maplebus_dma_reset();
540} 564}
541 565
542static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id) 566static irqreturn_t maplebus_dma_interrupt(int irq, void *dev_id)
543{ 567{
544 /* Load everything into the bottom half */ 568 /* Load everything into the bottom half */
545 schedule_work(&maple_dma_process); 569 schedule_work(&maple_dma_process);
546 return IRQ_HANDLED; 570 return IRQ_HANDLED;
547} 571}
548 572
549static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id) 573static irqreturn_t maplebus_vblank_interrupt(int irq, void *dev_id)
550{ 574{
551 schedule_work(&maple_vblank_process); 575 schedule_work(&maple_vblank_process);
552 return IRQ_HANDLED; 576 return IRQ_HANDLED;
553} 577}
554 578
555static struct irqaction maple_dma_irq = {
556 .name = "maple bus DMA handler",
557 .handler = maplebus_dma_interrupt,
558 .flags = IRQF_SHARED,
559};
560
561static struct irqaction maple_vblank_irq = {
562 .name = "maple bus VBLANK handler",
563 .handler = maplebus_vblank_interrupt,
564 .flags = IRQF_SHARED,
565};
566
567static int maple_set_dma_interrupt_handler(void) 579static int maple_set_dma_interrupt_handler(void)
568{ 580{
569 return setup_irq(HW_EVENT_MAPLE_DMA, &maple_dma_irq); 581 return request_irq(HW_EVENT_MAPLE_DMA, maplebus_dma_interrupt,
582 IRQF_SHARED, "maple bus DMA", &maple_dummy_driver);
570} 583}
571 584
572static int maple_set_vblank_interrupt_handler(void) 585static int maple_set_vblank_interrupt_handler(void)
573{ 586{
574 return setup_irq(HW_EVENT_VSYNC, &maple_vblank_irq); 587 return request_irq(HW_EVENT_VSYNC, maplebus_vblank_interrupt,
588 IRQF_SHARED, "maple bus VBLANK", &maple_dummy_driver);
575} 589}
576 590
577static int maple_get_dma_buffer(void) 591static int maple_get_dma_buffer(void)
578{ 592{
579 maple_sendbuf = 593 maple_sendbuf =
580 (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 594 (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
581 MAPLE_DMA_PAGES); 595 MAPLE_DMA_PAGES);
582 if (!maple_sendbuf) 596 if (!maple_sendbuf)
583 return -ENOMEM; 597 return -ENOMEM;
584 return 0; 598 return 0;
585} 599}
586 600
587static int match_maple_bus_driver(struct device *devptr, 601static int match_maple_bus_driver(struct device *devptr,
588 struct device_driver *drvptr) 602 struct device_driver *drvptr)
589{ 603{
590 struct maple_driver *maple_drv; 604 struct maple_driver *maple_drv;
591 struct maple_device *maple_dev; 605 struct maple_device *maple_dev;
592 606
593 maple_drv = container_of(drvptr, struct maple_driver, drv); 607 maple_drv = container_of(drvptr, struct maple_driver, drv);
594 maple_dev = container_of(devptr, struct maple_device, dev); 608 maple_dev = container_of(devptr, struct maple_device, dev);
595 /* Trap empty port case */ 609 /* Trap empty port case */
596 if (maple_dev->devinfo.function == 0xFFFFFFFF) 610 if (maple_dev->devinfo.function == 0xFFFFFFFF)
597 return 0; 611 return 0;
598 else if (maple_dev->devinfo.function & 612 else if (maple_dev->devinfo.function &
599 be32_to_cpu(maple_drv->function)) 613 be32_to_cpu(maple_drv->function))
600 return 1; 614 return 1;
601 return 0; 615 return 0;
602} 616}
603 617
604static int maple_bus_uevent(struct device *dev, struct kobj_uevent_env *env) 618static int maple_bus_uevent(struct device *dev,
619 struct kobj_uevent_env *env)
605{ 620{
606 return 0; 621 return 0;
607} 622}
608 623
609static void maple_bus_release(struct device *dev) 624static void maple_bus_release(struct device *dev)
@@ -611,124 +626,122 @@ static void maple_bus_release(struct device *dev)
611} 626}
612 627
613static struct maple_driver maple_dummy_driver = { 628static struct maple_driver maple_dummy_driver = {
614 .drv = { 629 .drv = {
615 .name = "maple_dummy_driver", 630 .name = "maple_dummy_driver",
616 .bus = &maple_bus_type, 631 .bus = &maple_bus_type,
617 }, 632 },
618}; 633};
619 634
620struct bus_type maple_bus_type = { 635struct bus_type maple_bus_type = {
621 .name = "maple", 636 .name = "maple",
622 .match = match_maple_bus_driver, 637 .match = match_maple_bus_driver,
623 .uevent = maple_bus_uevent, 638 .uevent = maple_bus_uevent,
624}; 639};
625EXPORT_SYMBOL_GPL(maple_bus_type); 640EXPORT_SYMBOL_GPL(maple_bus_type);
626 641
627static struct device maple_bus = { 642static struct device maple_bus = {
628 .bus_id = "maple", 643 .bus_id = "maple",
629 .release = maple_bus_release, 644 .release = maple_bus_release,
630}; 645};
631 646
632static int __init maple_bus_init(void) 647static int __init maple_bus_init(void)
633{ 648{
634 int retval, i; 649 int retval, i;
635 struct maple_device *mdev[MAPLE_PORTS]; 650 struct maple_device *mdev[MAPLE_PORTS];
636 ctrl_outl(0, MAPLE_STATE); 651 ctrl_outl(0, MAPLE_STATE);
637 652
638 retval = device_register(&maple_bus); 653 retval = device_register(&maple_bus);
639 if (retval) 654 if (retval)
640 goto cleanup; 655 goto cleanup;
641 656
642 retval = bus_register(&maple_bus_type); 657 retval = bus_register(&maple_bus_type);
643 if (retval) 658 if (retval)
644 goto cleanup_device; 659 goto cleanup_device;
645 660
646 retval = driver_register(&maple_dummy_driver.drv); 661 retval = driver_register(&maple_dummy_driver.drv);
647 662 if (retval)
648 if (retval) 663 goto cleanup_bus;
649 goto cleanup_bus; 664
650 665 /* allocate memory for maple bus dma */
651 /* allocate memory for maple bus dma */ 666 retval = maple_get_dma_buffer();
652 retval = maple_get_dma_buffer(); 667 if (retval) {
653 if (retval) { 668 printk(KERN_INFO
654 printk(KERN_INFO 669 "Maple bus: Failed to allocate Maple DMA buffers\n");
655 "Maple bus: Failed to allocate Maple DMA buffers\n"); 670 goto cleanup_basic;
656 goto cleanup_basic; 671 }
657 } 672
658 673 /* set up DMA interrupt handler */
659 /* set up DMA interrupt handler */ 674 retval = maple_set_dma_interrupt_handler();
660 retval = maple_set_dma_interrupt_handler(); 675 if (retval) {
661 if (retval) { 676 printk(KERN_INFO
662 printk(KERN_INFO 677 "Maple bus: Failed to grab maple DMA IRQ\n");
663 "Maple bus: Failed to grab maple DMA IRQ\n"); 678 goto cleanup_dma;
664 goto cleanup_dma; 679 }
665 } 680
666 681 /* set up VBLANK interrupt handler */
667 /* set up VBLANK interrupt handler */ 682 retval = maple_set_vblank_interrupt_handler();
668 retval = maple_set_vblank_interrupt_handler(); 683 if (retval) {
669 if (retval) { 684 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n");
670 printk(KERN_INFO "Maple bus: Failed to grab VBLANK IRQ\n"); 685 goto cleanup_irq;
671 goto cleanup_irq; 686 }
672 } 687
673 688 maple_queue_cache =
674 maple_queue_cache = 689 kmem_cache_create("maple_queue_cache", 0x400, 0,
675 kmem_cache_create("maple_queue_cache", 0x400, 0, 690 SLAB_POISON|SLAB_HWCACHE_ALIGN, NULL);
676 SLAB_HWCACHE_ALIGN, NULL); 691
677 692 if (!maple_queue_cache)
678 if (!maple_queue_cache) 693 goto cleanup_bothirqs;
679 goto cleanup_bothirqs; 694
680 695 /* setup maple ports */
681 /* setup maple ports */ 696 for (i = 0; i < MAPLE_PORTS; i++) {
682 for (i = 0; i < MAPLE_PORTS; i++) { 697 mdev[i] = maple_alloc_dev(i, 0);
683 mdev[i] = maple_alloc_dev(i, 0); 698 if (!mdev[i]) {
684 if (!mdev[i]) { 699 while (i-- > 0)
685 while (i-- > 0) 700 maple_free_dev(mdev[i]);
686 maple_free_dev(mdev[i]); 701 goto cleanup_cache;
687 goto cleanup_cache; 702 }
688 } 703 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO;
689 mdev[i]->registered = 0; 704 mdev[i]->mq->length = 0;
690 mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; 705 maple_add_packet(mdev[i]->mq);
691 mdev[i]->mq->length = 0; 706 /* delay aids hardware detection */
692 maple_attach_driver(mdev[i]); 707 mdelay(5);
693 maple_add_packet(mdev[i]->mq); 708 subdevice_map[i] = 0;
694 subdevice_map[i] = 0; 709 }
695 } 710
696 711 realscan = 1;
697 /* setup maplebus hardware */ 712 /* setup maplebus hardware */
698 maplebus_dma_reset(); 713 maplebus_dma_reset();
699 714 /* initial detection */
700 /* initial detection */ 715 maple_send();
701 maple_send(); 716 maple_pnp_time = jiffies;
702 717 printk(KERN_INFO "Maple bus core now registered.\n");
703 maple_pnp_time = jiffies; 718
704 719 return 0;
705 printk(KERN_INFO "Maple bus core now registered.\n");
706
707 return 0;
708 720
709cleanup_cache: 721cleanup_cache:
710 kmem_cache_destroy(maple_queue_cache); 722 kmem_cache_destroy(maple_queue_cache);
711 723
712cleanup_bothirqs: 724cleanup_bothirqs:
713 free_irq(HW_EVENT_VSYNC, 0); 725 free_irq(HW_EVENT_VSYNC, 0);
714 726
715cleanup_irq: 727cleanup_irq:
716 free_irq(HW_EVENT_MAPLE_DMA, 0); 728 free_irq(HW_EVENT_MAPLE_DMA, 0);
717 729
718cleanup_dma: 730cleanup_dma:
719 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES); 731 free_pages((unsigned long) maple_sendbuf, MAPLE_DMA_PAGES);
720 732
721cleanup_basic: 733cleanup_basic:
722 driver_unregister(&maple_dummy_driver.drv); 734 driver_unregister(&maple_dummy_driver.drv);
723 735
724cleanup_bus: 736cleanup_bus:
725 bus_unregister(&maple_bus_type); 737 bus_unregister(&maple_bus_type);
726 738
727cleanup_device: 739cleanup_device:
728 device_unregister(&maple_bus); 740 device_unregister(&maple_bus);
729 741
730cleanup: 742cleanup:
731 printk(KERN_INFO "Maple bus registration failed\n"); 743 printk(KERN_INFO "Maple bus registration failed\n");
732 return retval; 744 return retval;
733} 745}
734subsys_initcall(maple_bus_init); 746/* Push init to later to ensure hardware gets detected */
747fs_initcall(maple_bus_init);
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 293b7cab3e57..85687aaf9cab 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -87,6 +87,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
87 unsigned gpio = (unsigned) spi->controller_data; 87 unsigned gpio = (unsigned) spi->controller_data;
88 unsigned active = spi->mode & SPI_CS_HIGH; 88 unsigned active = spi->mode & SPI_CS_HIGH;
89 u32 mr; 89 u32 mr;
90 int i;
91 u32 csr;
92 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
93
94 /* Make sure clock polarity is correct */
95 for (i = 0; i < spi->master->num_chipselect; i++) {
96 csr = spi_readl(as, CSR0 + 4 * i);
97 if ((csr ^ cpol) & SPI_BIT(CPOL))
98 spi_writel(as, CSR0 + 4 * i, csr ^ SPI_BIT(CPOL));
99 }
90 100
91 mr = spi_readl(as, MR); 101 mr = spi_readl(as, MR);
92 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 102 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 365e0e355aea..59deed79e0ab 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -51,13 +51,19 @@ MODULE_LICENSE("GPL");
51#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 51#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
52#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) 52#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
53 53
54/* for testing SSCR1 changes that require SSP restart, basically 54/*
55 * everything except the service and interrupt enables */ 55 * for testing SSCR1 changes that require SSP restart, basically
56#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_EBCEI | SSCR1_SCFR \ 56 * everything except the service and interrupt enables, the pxa270 developer
57 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
58 * list, but the PXA255 dev man says all bits without really meaning the
59 * service and interrupt enables
60 */
61#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
57 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \ 62 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
58 | SSCR1_RWOT | SSCR1_TRAIL | SSCR1_PINTE \ 63 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
59 | SSCR1_STRF | SSCR1_EFWR |SSCR1_RFT \ 64 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
60 | SSCR1_TFT | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 65 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
66 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
61 67
62#define DEFINE_SSP_REG(reg, off) \ 68#define DEFINE_SSP_REG(reg, off) \
63static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \ 69static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
@@ -973,9 +979,6 @@ static void pump_transfers(unsigned long data)
973 if (drv_data->ssp_type == PXA25x_SSP) 979 if (drv_data->ssp_type == PXA25x_SSP)
974 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; 980 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
975 981
976 /* Fix me, need to handle cs polarity */
977 drv_data->cs_control(PXA2XX_CS_ASSERT);
978
979 /* Clear status and start DMA engine */ 982 /* Clear status and start DMA engine */
980 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; 983 cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
981 write_SSSR(drv_data->clear_sr, reg); 984 write_SSSR(drv_data->clear_sr, reg);
@@ -985,9 +988,6 @@ static void pump_transfers(unsigned long data)
985 /* Ensure we have the correct interrupt handler */ 988 /* Ensure we have the correct interrupt handler */
986 drv_data->transfer_handler = interrupt_transfer; 989 drv_data->transfer_handler = interrupt_transfer;
987 990
988 /* Fix me, need to handle cs polarity */
989 drv_data->cs_control(PXA2XX_CS_ASSERT);
990
991 /* Clear status */ 991 /* Clear status */
992 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; 992 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
993 write_SSSR(drv_data->clear_sr, reg); 993 write_SSSR(drv_data->clear_sr, reg);
@@ -998,16 +998,29 @@ static void pump_transfers(unsigned long data)
998 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != 998 || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
999 (cr1 & SSCR1_CHANGE_MASK)) { 999 (cr1 & SSCR1_CHANGE_MASK)) {
1000 1000
1001 /* stop the SSP, and update the other bits */
1001 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 1002 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
1002 if (drv_data->ssp_type != PXA25x_SSP) 1003 if (drv_data->ssp_type != PXA25x_SSP)
1003 write_SSTO(chip->timeout, reg); 1004 write_SSTO(chip->timeout, reg);
1004 write_SSCR1(cr1, reg); 1005 /* first set CR1 without interrupt and service enables */
1006 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
1007 /* restart the SSP */
1005 write_SSCR0(cr0, reg); 1008 write_SSCR0(cr0, reg);
1009
1006 } else { 1010 } else {
1007 if (drv_data->ssp_type != PXA25x_SSP) 1011 if (drv_data->ssp_type != PXA25x_SSP)
1008 write_SSTO(chip->timeout, reg); 1012 write_SSTO(chip->timeout, reg);
1009 write_SSCR1(cr1, reg);
1010 } 1013 }
1014
1015 /* FIXME, need to handle cs polarity,
1016 * this driver uses struct pxa2xx_spi_chip.cs_control to
1017 * specify a CS handling function, and it ignores most
1018 * struct spi_device.mode[s], including SPI_CS_HIGH */
1019 drv_data->cs_control(PXA2XX_CS_ASSERT);
1020
1021 /* after chip select, release the data by enabling service
1022 * requests and interrupts, without changing any mode bits */
1023 write_SSCR1(cr1, reg);
1011} 1024}
1012 1025
1013static void pump_messages(struct work_struct *work) 1026static void pump_messages(struct work_struct *work)
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d976660cb7f0..78fd33125e02 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -105,6 +105,12 @@ config SSB_DRIVER_MIPS
105 105
106 If unsure, say N 106 If unsure, say N
107 107
108# Assumption: We are on embedded, if we compile the MIPS core.
109config SSB_EMBEDDED
110 bool
111 depends on SSB_DRIVER_MIPS
112 default y
113
108config SSB_DRIVER_EXTIF 114config SSB_DRIVER_EXTIF
109 bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)" 115 bool "SSB Broadcom EXTIF core driver (EXPERIMENTAL)"
110 depends on SSB_DRIVER_MIPS && EXPERIMENTAL 116 depends on SSB_DRIVER_MIPS && EXPERIMENTAL
diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile
index 7be397595805..e235144add7c 100644
--- a/drivers/ssb/Makefile
+++ b/drivers/ssb/Makefile
@@ -1,5 +1,6 @@
1# core 1# core
2ssb-y += main.o scan.o 2ssb-y += main.o scan.o
3ssb-$(CONFIG_SSB_EMBEDDED) += embedded.o
3 4
4# host support 5# host support
5ssb-$(CONFIG_SSB_PCIHOST) += pci.o pcihost_wrapper.o 6ssb-$(CONFIG_SSB_PCIHOST) += pci.o pcihost_wrapper.o
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 6fbf1c53b6f2..e586321a473a 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -39,12 +39,14 @@ static inline void chipco_write32(struct ssb_chipcommon *cc,
39 ssb_write32(cc->dev, offset, value); 39 ssb_write32(cc->dev, offset, value);
40} 40}
41 41
42static inline void chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset, 42static inline u32 chipco_write32_masked(struct ssb_chipcommon *cc, u16 offset,
43 u32 mask, u32 value) 43 u32 mask, u32 value)
44{ 44{
45 value &= mask; 45 value &= mask;
46 value |= chipco_read32(cc, offset) & ~mask; 46 value |= chipco_read32(cc, offset) & ~mask;
47 chipco_write32(cc, offset, value); 47 chipco_write32(cc, offset, value);
48
49 return value;
48} 50}
49 51
50void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc, 52void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
@@ -356,14 +358,29 @@ u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask)
356 return chipco_read32(cc, SSB_CHIPCO_GPIOIN) & mask; 358 return chipco_read32(cc, SSB_CHIPCO_GPIOIN) & mask;
357} 359}
358 360
359void ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value) 361u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value)
362{
363 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value);
364}
365
366u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value)
367{
368 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value);
369}
370
371u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
372{
373 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
374}
375
376u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
360{ 377{
361 chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUT, mask, value); 378 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOIRQ, mask, value);
362} 379}
363 380
364void ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value) 381u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value)
365{ 382{
366 chipco_write32_masked(cc, SSB_CHIPCO_GPIOOUTEN, mask, value); 383 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOPOL, mask, value);
367} 384}
368 385
369#ifdef CONFIG_SSB_SERIAL 386#ifdef CONFIG_SSB_SERIAL
@@ -376,6 +393,7 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
376 unsigned int irq; 393 unsigned int irq;
377 u32 baud_base, div; 394 u32 baud_base, div;
378 u32 i, n; 395 u32 i, n;
396 unsigned int ccrev = cc->dev->id.revision;
379 397
380 plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT); 398 plltype = (cc->capabilities & SSB_CHIPCO_CAP_PLLT);
381 irq = ssb_mips_irq(cc->dev); 399 irq = ssb_mips_irq(cc->dev);
@@ -387,14 +405,39 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
387 chipco_read32(cc, SSB_CHIPCO_CLOCK_M2)); 405 chipco_read32(cc, SSB_CHIPCO_CLOCK_M2));
388 div = 1; 406 div = 1;
389 } else { 407 } else {
390 if (cc->dev->id.revision >= 11) { 408 if (ccrev == 20) {
409 /* BCM5354 uses constant 25MHz clock */
410 baud_base = 25000000;
411 div = 48;
412 /* Set the override bit so we don't divide it */
413 chipco_write32(cc, SSB_CHIPCO_CORECTL,
414 chipco_read32(cc, SSB_CHIPCO_CORECTL)
415 | SSB_CHIPCO_CORECTL_UARTCLK0);
416 } else if ((ccrev >= 11) && (ccrev != 15)) {
391 /* Fixed ALP clock */ 417 /* Fixed ALP clock */
392 baud_base = 20000000; 418 baud_base = 20000000;
419 if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
420 /* FIXME: baud_base is different for devices with a PMU */
421 SSB_WARN_ON(1);
422 }
393 div = 1; 423 div = 1;
424 if (ccrev >= 21) {
425 /* Turn off UART clock before switching clocksource. */
426 chipco_write32(cc, SSB_CHIPCO_CORECTL,
427 chipco_read32(cc, SSB_CHIPCO_CORECTL)
428 & ~SSB_CHIPCO_CORECTL_UARTCLKEN);
429 }
394 /* Set the override bit so we don't divide it */ 430 /* Set the override bit so we don't divide it */
395 chipco_write32(cc, SSB_CHIPCO_CORECTL, 431 chipco_write32(cc, SSB_CHIPCO_CORECTL,
396 SSB_CHIPCO_CORECTL_UARTCLK0); 432 chipco_read32(cc, SSB_CHIPCO_CORECTL)
397 } else if (cc->dev->id.revision >= 3) { 433 | SSB_CHIPCO_CORECTL_UARTCLK0);
434 if (ccrev >= 21) {
435 /* Re-enable the UART clock. */
436 chipco_write32(cc, SSB_CHIPCO_CORECTL,
437 chipco_read32(cc, SSB_CHIPCO_CORECTL)
438 | SSB_CHIPCO_CORECTL_UARTCLKEN);
439 }
440 } else if (ccrev >= 3) {
398 /* Internal backplane clock */ 441 /* Internal backplane clock */
399 baud_base = ssb_clockspeed(bus); 442 baud_base = ssb_clockspeed(bus);
400 div = chipco_read32(cc, SSB_CHIPCO_CLKDIV) 443 div = chipco_read32(cc, SSB_CHIPCO_CLKDIV)
@@ -406,7 +449,7 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
406 } 449 }
407 450
408 /* Clock source depends on strapping if UartClkOverride is unset */ 451 /* Clock source depends on strapping if UartClkOverride is unset */
409 if ((cc->dev->id.revision > 0) && 452 if ((ccrev > 0) &&
410 !(chipco_read32(cc, SSB_CHIPCO_CORECTL) & SSB_CHIPCO_CORECTL_UARTCLK0)) { 453 !(chipco_read32(cc, SSB_CHIPCO_CORECTL) & SSB_CHIPCO_CORECTL_UARTCLK0)) {
411 if ((cc->capabilities & SSB_CHIPCO_CAP_UARTCLK) == 454 if ((cc->capabilities & SSB_CHIPCO_CAP_UARTCLK) ==
412 SSB_CHIPCO_CAP_UARTCLK_INT) { 455 SSB_CHIPCO_CAP_UARTCLK_INT) {
@@ -428,7 +471,7 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
428 cc_mmio = cc->dev->bus->mmio + (cc->dev->core_index * SSB_CORE_SIZE); 471 cc_mmio = cc->dev->bus->mmio + (cc->dev->core_index * SSB_CORE_SIZE);
429 uart_regs = cc_mmio + SSB_CHIPCO_UART0_DATA; 472 uart_regs = cc_mmio + SSB_CHIPCO_UART0_DATA;
430 /* Offset changed at after rev 0 */ 473 /* Offset changed at after rev 0 */
431 if (cc->dev->id.revision == 0) 474 if (ccrev == 0)
432 uart_regs += (i * 8); 475 uart_regs += (i * 8);
433 else 476 else
434 uart_regs += (i * 256); 477 uart_regs += (i * 256);
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index fe55eb8b038a..c3e1d3e6d610 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -27,12 +27,14 @@ static inline void extif_write32(struct ssb_extif *extif, u16 offset, u32 value)
27 ssb_write32(extif->dev, offset, value); 27 ssb_write32(extif->dev, offset, value);
28} 28}
29 29
30static inline void extif_write32_masked(struct ssb_extif *extif, u16 offset, 30static inline u32 extif_write32_masked(struct ssb_extif *extif, u16 offset,
31 u32 mask, u32 value) 31 u32 mask, u32 value)
32{ 32{
33 value &= mask; 33 value &= mask;
34 value |= extif_read32(extif, offset) & ~mask; 34 value |= extif_read32(extif, offset) & ~mask;
35 extif_write32(extif, offset, value); 35 extif_write32(extif, offset, value);
36
37 return value;
36} 38}
37 39
38#ifdef CONFIG_SSB_SERIAL 40#ifdef CONFIG_SSB_SERIAL
@@ -110,20 +112,35 @@ void ssb_extif_get_clockcontrol(struct ssb_extif *extif,
110 *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB); 112 *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB);
111} 113}
112 114
115void ssb_extif_watchdog_timer_set(struct ssb_extif *extif,
116 u32 ticks)
117{
118 extif_write32(extif, SSB_EXTIF_WATCHDOG, ticks);
119}
120
113u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) 121u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
114{ 122{
115 return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask; 123 return extif_read32(extif, SSB_EXTIF_GPIO_IN) & mask;
116} 124}
117 125
118void ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value) 126u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value)
119{ 127{
120 return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0), 128 return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUT(0),
121 mask, value); 129 mask, value);
122} 130}
123 131
124void ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value) 132u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value)
125{ 133{
126 return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0), 134 return extif_write32_masked(extif, SSB_EXTIF_GPIO_OUTEN(0),
127 mask, value); 135 mask, value);
128} 136}
129 137
138u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value)
139{
140 return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTPOL, mask, value);
141}
142
143u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value)
144{
145 return extif_write32_masked(extif, SSB_EXTIF_GPIO_INTMASK, mask, value);
146}
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 2faaa906d5d6..6d99a9880055 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -11,6 +11,7 @@
11#include <linux/ssb/ssb.h> 11#include <linux/ssb/ssb.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/ssb/ssb_embedded.h>
14 15
15#include "ssb_private.h" 16#include "ssb_private.h"
16 17
@@ -27,6 +28,18 @@ void pcicore_write32(struct ssb_pcicore *pc, u16 offset, u32 value)
27 ssb_write32(pc->dev, offset, value); 28 ssb_write32(pc->dev, offset, value);
28} 29}
29 30
31static inline
32u16 pcicore_read16(struct ssb_pcicore *pc, u16 offset)
33{
34 return ssb_read16(pc->dev, offset);
35}
36
37static inline
38void pcicore_write16(struct ssb_pcicore *pc, u16 offset, u16 value)
39{
40 ssb_write16(pc->dev, offset, value);
41}
42
30/************************************************** 43/**************************************************
31 * Code for hostmode operation. 44 * Code for hostmode operation.
32 **************************************************/ 45 **************************************************/
@@ -66,6 +79,7 @@ int pcibios_plat_dev_init(struct pci_dev *d)
66 base = &ssb_pcicore_pcibus_iobase; 79 base = &ssb_pcicore_pcibus_iobase;
67 else 80 else
68 base = &ssb_pcicore_pcibus_membase; 81 base = &ssb_pcicore_pcibus_membase;
82 res->flags |= IORESOURCE_PCI_FIXED;
69 if (res->end) { 83 if (res->end) {
70 size = res->end - res->start + 1; 84 size = res->end - res->start + 1;
71 if (*base & (size - 1)) 85 if (*base & (size - 1))
@@ -88,10 +102,12 @@ int pcibios_plat_dev_init(struct pci_dev *d)
88 102
89static void __init ssb_fixup_pcibridge(struct pci_dev *dev) 103static void __init ssb_fixup_pcibridge(struct pci_dev *dev)
90{ 104{
105 u8 lat;
106
91 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0) 107 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0)
92 return; 108 return;
93 109
94 ssb_printk(KERN_INFO "PCI: fixing up bridge\n"); 110 ssb_printk(KERN_INFO "PCI: Fixing up bridge %s\n", pci_name(dev));
95 111
96 /* Enable PCI bridge bus mastering and memory space */ 112 /* Enable PCI bridge bus mastering and memory space */
97 pci_set_master(dev); 113 pci_set_master(dev);
@@ -101,7 +117,10 @@ static void __init ssb_fixup_pcibridge(struct pci_dev *dev)
101 pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3); 117 pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3);
102 118
103 /* Make sure our latency is high enough to handle the devices behind us */ 119 /* Make sure our latency is high enough to handle the devices behind us */
104 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xa8); 120 lat = 168;
121 ssb_printk(KERN_INFO "PCI: Fixing latency timer of device %s to %u\n",
122 pci_name(dev), lat);
123 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
105} 124}
106DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_fixup_pcibridge); 125DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_fixup_pcibridge);
107 126
@@ -117,8 +136,10 @@ static u32 get_cfgspace_addr(struct ssb_pcicore *pc,
117 u32 addr = 0; 136 u32 addr = 0;
118 u32 tmp; 137 u32 tmp;
119 138
120 if (unlikely(pc->cardbusmode && dev > 1)) 139 /* We do only have one cardbus device behind the bridge. */
140 if (pc->cardbusmode && (dev >= 1))
121 goto out; 141 goto out;
142
122 if (bus == 0) { 143 if (bus == 0) {
123 /* Type 0 transaction */ 144 /* Type 0 transaction */
124 if (unlikely(dev >= SSB_PCI_SLOT_MAX)) 145 if (unlikely(dev >= SSB_PCI_SLOT_MAX))
@@ -279,14 +300,14 @@ static struct resource ssb_pcicore_mem_resource = {
279 .name = "SSB PCIcore external memory", 300 .name = "SSB PCIcore external memory",
280 .start = SSB_PCI_DMA, 301 .start = SSB_PCI_DMA,
281 .end = SSB_PCI_DMA + SSB_PCI_DMA_SZ - 1, 302 .end = SSB_PCI_DMA + SSB_PCI_DMA_SZ - 1,
282 .flags = IORESOURCE_MEM, 303 .flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED,
283}; 304};
284 305
285static struct resource ssb_pcicore_io_resource = { 306static struct resource ssb_pcicore_io_resource = {
286 .name = "SSB PCIcore external I/O", 307 .name = "SSB PCIcore external I/O",
287 .start = 0x100, 308 .start = 0x100,
288 .end = 0x7FF, 309 .end = 0x7FF,
289 .flags = IORESOURCE_IO, 310 .flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED,
290}; 311};
291 312
292static struct pci_controller ssb_pcicore_controller = { 313static struct pci_controller ssb_pcicore_controller = {
@@ -318,7 +339,16 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
318 pcicore_write32(pc, SSB_PCICORE_ARBCTL, val); 339 pcicore_write32(pc, SSB_PCICORE_ARBCTL, val);
319 udelay(1); /* Assertion time demanded by the PCI standard */ 340 udelay(1); /* Assertion time demanded by the PCI standard */
320 341
321 /*TODO cardbus mode */ 342 if (pc->dev->bus->has_cardbus_slot) {
343 ssb_dprintk(KERN_INFO PFX "CardBus slot detected\n");
344 pc->cardbusmode = 1;
345 /* GPIO 1 resets the bridge */
346 ssb_gpio_out(pc->dev->bus, 1, 1);
347 ssb_gpio_outen(pc->dev->bus, 1, 1);
348 pcicore_write16(pc, SSB_PCICORE_SPROM(0),
349 pcicore_read16(pc, SSB_PCICORE_SPROM(0))
350 | 0x0400);
351 }
322 352
323 /* 64MB I/O window */ 353 /* 64MB I/O window */
324 pcicore_write32(pc, SSB_PCICORE_SBTOPCI0, 354 pcicore_write32(pc, SSB_PCICORE_SBTOPCI0,
@@ -344,7 +374,8 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
344 /* Ok, ready to run, register it to the system. 374 /* Ok, ready to run, register it to the system.
345 * The following needs change, if we want to port hostmode 375 * The following needs change, if we want to port hostmode
346 * to non-MIPS platform. */ 376 * to non-MIPS platform. */
347 set_io_port_base((unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000)); 377 ssb_pcicore_controller.io_map_base = (unsigned long)ioremap_nocache(SSB_PCI_MEM, 0x04000000);
378 set_io_port_base(ssb_pcicore_controller.io_map_base);
348 /* Give some time to the PCI controller to configure itself with the new 379 /* Give some time to the PCI controller to configure itself with the new
349 * values. Not waiting at this point causes crashes of the machine. */ 380 * values. Not waiting at this point causes crashes of the machine. */
350 mdelay(10); 381 mdelay(10);
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
new file mode 100644
index 000000000000..d3ade821555c
--- /dev/null
+++ b/drivers/ssb/embedded.c
@@ -0,0 +1,132 @@
1/*
2 * Sonics Silicon Backplane
3 * Embedded systems support code
4 *
5 * Copyright 2005-2008, Broadcom Corporation
6 * Copyright 2006-2008, Michael Buesch <mb@bu3sch.de>
7 *
8 * Licensed under the GNU/GPL. See COPYING for details.
9 */
10
11#include <linux/ssb/ssb.h>
12#include <linux/ssb/ssb_embedded.h>
13
14#include "ssb_private.h"
15
16
17int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks)
18{
19 if (ssb_chipco_available(&bus->chipco)) {
20 ssb_chipco_watchdog_timer_set(&bus->chipco, ticks);
21 return 0;
22 }
23 if (ssb_extif_available(&bus->extif)) {
24 ssb_extif_watchdog_timer_set(&bus->extif, ticks);
25 return 0;
26 }
27 return -ENODEV;
28}
29
30u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask)
31{
32 unsigned long flags;
33 u32 res = 0;
34
35 spin_lock_irqsave(&bus->gpio_lock, flags);
36 if (ssb_chipco_available(&bus->chipco))
37 res = ssb_chipco_gpio_in(&bus->chipco, mask);
38 else if (ssb_extif_available(&bus->extif))
39 res = ssb_extif_gpio_in(&bus->extif, mask);
40 else
41 SSB_WARN_ON(1);
42 spin_unlock_irqrestore(&bus->gpio_lock, flags);
43
44 return res;
45}
46EXPORT_SYMBOL(ssb_gpio_in);
47
48u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value)
49{
50 unsigned long flags;
51 u32 res = 0;
52
53 spin_lock_irqsave(&bus->gpio_lock, flags);
54 if (ssb_chipco_available(&bus->chipco))
55 res = ssb_chipco_gpio_out(&bus->chipco, mask, value);
56 else if (ssb_extif_available(&bus->extif))
57 res = ssb_extif_gpio_out(&bus->extif, mask, value);
58 else
59 SSB_WARN_ON(1);
60 spin_unlock_irqrestore(&bus->gpio_lock, flags);
61
62 return res;
63}
64EXPORT_SYMBOL(ssb_gpio_out);
65
66u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value)
67{
68 unsigned long flags;
69 u32 res = 0;
70
71 spin_lock_irqsave(&bus->gpio_lock, flags);
72 if (ssb_chipco_available(&bus->chipco))
73 res = ssb_chipco_gpio_outen(&bus->chipco, mask, value);
74 else if (ssb_extif_available(&bus->extif))
75 res = ssb_extif_gpio_outen(&bus->extif, mask, value);
76 else
77 SSB_WARN_ON(1);
78 spin_unlock_irqrestore(&bus->gpio_lock, flags);
79
80 return res;
81}
82EXPORT_SYMBOL(ssb_gpio_outen);
83
84u32 ssb_gpio_control(struct ssb_bus *bus, u32 mask, u32 value)
85{
86 unsigned long flags;
87 u32 res = 0;
88
89 spin_lock_irqsave(&bus->gpio_lock, flags);
90 if (ssb_chipco_available(&bus->chipco))
91 res = ssb_chipco_gpio_control(&bus->chipco, mask, value);
92 spin_unlock_irqrestore(&bus->gpio_lock, flags);
93
94 return res;
95}
96EXPORT_SYMBOL(ssb_gpio_control);
97
98u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value)
99{
100 unsigned long flags;
101 u32 res = 0;
102
103 spin_lock_irqsave(&bus->gpio_lock, flags);
104 if (ssb_chipco_available(&bus->chipco))
105 res = ssb_chipco_gpio_intmask(&bus->chipco, mask, value);
106 else if (ssb_extif_available(&bus->extif))
107 res = ssb_extif_gpio_intmask(&bus->extif, mask, value);
108 else
109 SSB_WARN_ON(1);
110 spin_unlock_irqrestore(&bus->gpio_lock, flags);
111
112 return res;
113}
114EXPORT_SYMBOL(ssb_gpio_intmask);
115
116u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value)
117{
118 unsigned long flags;
119 u32 res = 0;
120
121 spin_lock_irqsave(&bus->gpio_lock, flags);
122 if (ssb_chipco_available(&bus->chipco))
123 res = ssb_chipco_gpio_polarity(&bus->chipco, mask, value);
124 else if (ssb_extif_available(&bus->extif))
125 res = ssb_extif_gpio_polarity(&bus->extif, mask, value);
126 else
127 SSB_WARN_ON(1);
128 spin_unlock_irqrestore(&bus->gpio_lock, flags);
129
130 return res;
131}
132EXPORT_SYMBOL(ssb_gpio_polarity);
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 9028ed5715a1..bedb2b4ee9d2 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -557,6 +557,7 @@ static int ssb_fetch_invariants(struct ssb_bus *bus,
557 goto out; 557 goto out;
558 memcpy(&bus->boardinfo, &iv.boardinfo, sizeof(iv.boardinfo)); 558 memcpy(&bus->boardinfo, &iv.boardinfo, sizeof(iv.boardinfo));
559 memcpy(&bus->sprom, &iv.sprom, sizeof(iv.sprom)); 559 memcpy(&bus->sprom, &iv.sprom, sizeof(iv.sprom));
560 bus->has_cardbus_slot = iv.has_cardbus_slot;
560out: 561out:
561 return err; 562 return err;
562} 563}
@@ -569,6 +570,9 @@ static int ssb_bus_register(struct ssb_bus *bus,
569 570
570 spin_lock_init(&bus->bar_lock); 571 spin_lock_init(&bus->bar_lock);
571 INIT_LIST_HEAD(&bus->list); 572 INIT_LIST_HEAD(&bus->list);
573#ifdef CONFIG_SSB_EMBEDDED
574 spin_lock_init(&bus->gpio_lock);
575#endif
572 576
573 /* Powerup the bus */ 577 /* Powerup the bus */
574 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1); 578 err = ssb_pci_xtal(bus, SSB_GPIO_XTAL | SSB_GPIO_PLL, 1);
diff --git a/drivers/thermal/thermal.c b/drivers/thermal/thermal.c
index e782b3e7fcdb..8b86e53ccf7a 100644
--- a/drivers/thermal/thermal.c
+++ b/drivers/thermal/thermal.c
@@ -306,12 +306,23 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
306{ 306{
307 struct thermal_cooling_device_instance *dev; 307 struct thermal_cooling_device_instance *dev;
308 struct thermal_cooling_device_instance *pos; 308 struct thermal_cooling_device_instance *pos;
309 struct thermal_zone_device *pos1;
310 struct thermal_cooling_device *pos2;
309 int result; 311 int result;
310 312
311 if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) 313 if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
312 return -EINVAL; 314 return -EINVAL;
313 315
314 if (!tz || !cdev) 316 list_for_each_entry(pos1, &thermal_tz_list, node) {
317 if (pos1 == tz)
318 break;
319 }
320 list_for_each_entry(pos2, &thermal_cdev_list, node) {
321 if (pos2 == cdev)
322 break;
323 }
324
325 if (tz != pos1 || cdev != pos2)
315 return -EINVAL; 326 return -EINVAL;
316 327
317 dev = 328 dev =
@@ -437,20 +448,20 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *type,
437 int result; 448 int result;
438 449
439 if (strlen(type) >= THERMAL_NAME_LENGTH) 450 if (strlen(type) >= THERMAL_NAME_LENGTH)
440 return NULL; 451 return ERR_PTR(-EINVAL);
441 452
442 if (!ops || !ops->get_max_state || !ops->get_cur_state || 453 if (!ops || !ops->get_max_state || !ops->get_cur_state ||
443 !ops->set_cur_state) 454 !ops->set_cur_state)
444 return NULL; 455 return ERR_PTR(-EINVAL);
445 456
446 cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL); 457 cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL);
447 if (!cdev) 458 if (!cdev)
448 return NULL; 459 return ERR_PTR(-ENOMEM);
449 460
450 result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id); 461 result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id);
451 if (result) { 462 if (result) {
452 kfree(cdev); 463 kfree(cdev);
453 return NULL; 464 return ERR_PTR(result);
454 } 465 }
455 466
456 strcpy(cdev->type, type); 467 strcpy(cdev->type, type);
@@ -462,7 +473,7 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *type,
462 if (result) { 473 if (result) {
463 release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); 474 release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
464 kfree(cdev); 475 kfree(cdev);
465 return NULL; 476 return ERR_PTR(result);
466 } 477 }
467 478
468 /* sys I/F */ 479 /* sys I/F */
@@ -498,7 +509,7 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *type,
498 unregister: 509 unregister:
499 release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); 510 release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
500 device_unregister(&cdev->device); 511 device_unregister(&cdev->device);
501 return NULL; 512 return ERR_PTR(result);
502} 513}
503 514
504EXPORT_SYMBOL(thermal_cooling_device_register); 515EXPORT_SYMBOL(thermal_cooling_device_register);
@@ -570,17 +581,17 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
570 int count; 581 int count;
571 582
572 if (strlen(type) >= THERMAL_NAME_LENGTH) 583 if (strlen(type) >= THERMAL_NAME_LENGTH)
573 return NULL; 584 return ERR_PTR(-EINVAL);
574 585
575 if (trips > THERMAL_MAX_TRIPS || trips < 0) 586 if (trips > THERMAL_MAX_TRIPS || trips < 0)
576 return NULL; 587 return ERR_PTR(-EINVAL);
577 588
578 if (!ops || !ops->get_temp) 589 if (!ops || !ops->get_temp)
579 return NULL; 590 return ERR_PTR(-EINVAL);
580 591
581 tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL); 592 tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
582 if (!tz) 593 if (!tz)
583 return NULL; 594 return ERR_PTR(-ENOMEM);
584 595
585 INIT_LIST_HEAD(&tz->cooling_devices); 596 INIT_LIST_HEAD(&tz->cooling_devices);
586 idr_init(&tz->idr); 597 idr_init(&tz->idr);
@@ -588,7 +599,7 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
588 result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id); 599 result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id);
589 if (result) { 600 if (result) {
590 kfree(tz); 601 kfree(tz);
591 return NULL; 602 return ERR_PTR(result);
592 } 603 }
593 604
594 strcpy(tz->type, type); 605 strcpy(tz->type, type);
@@ -601,7 +612,7 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
601 if (result) { 612 if (result) {
602 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); 613 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
603 kfree(tz); 614 kfree(tz);
604 return NULL; 615 return ERR_PTR(result);
605 } 616 }
606 617
607 /* sys I/F */ 618 /* sys I/F */
@@ -643,7 +654,7 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
643 unregister: 654 unregister:
644 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); 655 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
645 device_unregister(&tz->device); 656 device_unregister(&tz->device);
646 return NULL; 657 return ERR_PTR(result);
647} 658}
648 659
649EXPORT_SYMBOL(thermal_zone_device_register); 660EXPORT_SYMBOL(thermal_zone_device_register);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 2a77e9d42c68..e8a01f264540 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -57,29 +57,29 @@ struct uio_map {
57}; 57};
58#define to_map(map) container_of(map, struct uio_map, kobj) 58#define to_map(map) container_of(map, struct uio_map, kobj)
59 59
60 60static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
61static ssize_t map_attr_show(struct kobject *kobj, struct kobj_attribute *attr,
62 char *buf)
63{ 61{
64 struct uio_map *map = to_map(kobj); 62 return sprintf(buf, "0x%lx\n", mem->addr);
65 struct uio_mem *mem = map->mem; 63}
66
67 if (strncmp(attr->attr.name, "addr", 4) == 0)
68 return sprintf(buf, "0x%lx\n", mem->addr);
69
70 if (strncmp(attr->attr.name, "size", 4) == 0)
71 return sprintf(buf, "0x%lx\n", mem->size);
72 64
73 return -ENODEV; 65static ssize_t map_size_show(struct uio_mem *mem, char *buf)
66{
67 return sprintf(buf, "0x%lx\n", mem->size);
74} 68}
75 69
76static struct kobj_attribute attr_attribute = 70struct uio_sysfs_entry {
77 __ATTR(addr, S_IRUGO, map_attr_show, NULL); 71 struct attribute attr;
78static struct kobj_attribute size_attribute = 72 ssize_t (*show)(struct uio_mem *, char *);
79 __ATTR(size, S_IRUGO, map_attr_show, NULL); 73 ssize_t (*store)(struct uio_mem *, const char *, size_t);
74};
75
76static struct uio_sysfs_entry addr_attribute =
77 __ATTR(addr, S_IRUGO, map_addr_show, NULL);
78static struct uio_sysfs_entry size_attribute =
79 __ATTR(size, S_IRUGO, map_size_show, NULL);
80 80
81static struct attribute *attrs[] = { 81static struct attribute *attrs[] = {
82 &attr_attribute.attr, 82 &addr_attribute.attr,
83 &size_attribute.attr, 83 &size_attribute.attr,
84 NULL, /* need to NULL terminate the list of attributes */ 84 NULL, /* need to NULL terminate the list of attributes */
85}; 85};
@@ -90,8 +90,28 @@ static void map_release(struct kobject *kobj)
90 kfree(map); 90 kfree(map);
91} 91}
92 92
93static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
94 char *buf)
95{
96 struct uio_map *map = to_map(kobj);
97 struct uio_mem *mem = map->mem;
98 struct uio_sysfs_entry *entry;
99
100 entry = container_of(attr, struct uio_sysfs_entry, attr);
101
102 if (!entry->show)
103 return -EIO;
104
105 return entry->show(mem, buf);
106}
107
108static struct sysfs_ops uio_sysfs_ops = {
109 .show = map_type_show,
110};
111
93static struct kobj_type map_attr_type = { 112static struct kobj_type map_attr_type = {
94 .release = map_release, 113 .release = map_release,
114 .sysfs_ops = &uio_sysfs_ops,
95 .default_attrs = attrs, 115 .default_attrs = attrs,
96}; 116};
97 117
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index bcc42136c93f..0147ea39340e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -496,13 +496,10 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
496 otherwise it is scheduled, and with high data rates data can get lost. */ 496 otherwise it is scheduled, and with high data rates data can get lost. */
497 tty->low_latency = 1; 497 tty->low_latency = 1;
498 498
499 if (usb_autopm_get_interface(acm->control)) { 499 if (usb_autopm_get_interface(acm->control) < 0)
500 mutex_unlock(&open_mutex); 500 goto early_bail;
501 return -EIO;
502 }
503 501
504 mutex_lock(&acm->mutex); 502 mutex_lock(&acm->mutex);
505 mutex_unlock(&open_mutex);
506 if (acm->used++) { 503 if (acm->used++) {
507 usb_autopm_put_interface(acm->control); 504 usb_autopm_put_interface(acm->control);
508 goto done; 505 goto done;
@@ -536,6 +533,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
536done: 533done:
537err_out: 534err_out:
538 mutex_unlock(&acm->mutex); 535 mutex_unlock(&acm->mutex);
536 mutex_unlock(&open_mutex);
539 return rv; 537 return rv;
540 538
541full_bailout: 539full_bailout:
@@ -544,6 +542,8 @@ bail_out:
544 usb_autopm_put_interface(acm->control); 542 usb_autopm_put_interface(acm->control);
545 acm->used--; 543 acm->used--;
546 mutex_unlock(&acm->mutex); 544 mutex_unlock(&acm->mutex);
545early_bail:
546 mutex_unlock(&open_mutex);
547 return -EIO; 547 return -EIO;
548} 548}
549 549
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index ad632f2d6f94..0647164d36db 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -428,6 +428,7 @@ static int usblp_open(struct inode *inode, struct file *file)
428 usblp->rcomplete = 0; 428 usblp->rcomplete = 0;
429 429
430 if (handle_bidir(usblp) < 0) { 430 if (handle_bidir(usblp) < 0) {
431 usb_autopm_put_interface(intf);
431 usblp->used = 0; 432 usblp->used = 0;
432 file->private_data = NULL; 433 file->private_data = NULL;
433 retval = -EIO; 434 retval = -EIO;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d42c561c75f1..f90ab5e94c58 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -28,11 +28,23 @@
28 * devices is broken... 28 * devices is broken...
29 */ 29 */
30static const struct usb_device_id usb_quirk_list[] = { 30static const struct usb_device_id usb_quirk_list[] = {
31 /* Action Semiconductor flash disk */
32 { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255},
33
31 /* CBM - Flash disk */ 34 /* CBM - Flash disk */
32 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, 35 { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
33 /* HP 5300/5370C scanner */ 36 /* HP 5300/5370C scanner */
34 { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, 37 { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 },
35 38
39 /* Creative SB Audigy 2 NX */
40 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
41
42 /* Roland SC-8820 */
43 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
44
45 /* Edirol SD-20 */
46 { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
47
36 /* INTEL VALUE SSD */ 48 /* INTEL VALUE SSD */
37 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, 49 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
38 50
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index a70e255402b8..e99872308144 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -1561,6 +1561,7 @@ done_set_intf:
1561 memcpy(req->buf, buf, n); 1561 memcpy(req->buf, buf, n);
1562 req->complete = rndis_response_complete; 1562 req->complete = rndis_response_complete;
1563 rndis_free_response(dev->rndis_config, buf); 1563 rndis_free_response(dev->rndis_config, buf);
1564 value = n;
1564 } 1565 }
1565 /* else stalls ... spec says to avoid that */ 1566 /* else stalls ... spec says to avoid that */
1566 } 1567 }
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 3301167d4f2a..017a196d041f 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3563,8 +3563,7 @@ static ssize_t show_file(struct device *dev, struct device_attribute *attr,
3563 3563
3564 down_read(&fsg->filesem); 3564 down_read(&fsg->filesem);
3565 if (backing_file_is_open(curlun)) { // Get the complete pathname 3565 if (backing_file_is_open(curlun)) { // Get the complete pathname
3566 p = d_path(curlun->filp->f_path.dentry, 3566 p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
3567 curlun->filp->f_path.mnt, buf, PAGE_SIZE - 1);
3568 if (IS_ERR(p)) 3567 if (IS_ERR(p))
3569 rc = PTR_ERR(p); 3568 rc = PTR_ERR(p);
3570 else { 3569 else {
@@ -3981,9 +3980,8 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3981 if (backing_file_is_open(curlun)) { 3980 if (backing_file_is_open(curlun)) {
3982 p = NULL; 3981 p = NULL;
3983 if (pathbuf) { 3982 if (pathbuf) {
3984 p = d_path(curlun->filp->f_path.dentry, 3983 p = d_path(&curlun->filp->f_path,
3985 curlun->filp->f_path.mnt, 3984 pathbuf, PATH_MAX);
3986 pathbuf, PATH_MAX);
3987 if (IS_ERR(p)) 3985 if (IS_ERR(p))
3988 p = NULL; 3986 p = NULL;
3989 } 3987 }
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 9fdabc8fcac4..4f6bfa100f2a 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1299,7 +1299,7 @@ printer_unbind(struct usb_gadget *gadget)
1299 printer_req_free(dev->in_ep, req); 1299 printer_req_free(dev->in_ep, req);
1300 } 1300 }
1301 1301
1302 if (dev->current_rx_req != NULL); 1302 if (dev->current_rx_req != NULL)
1303 printer_req_free(dev->out_ep, dev->current_rx_req); 1303 printer_req_free(dev->out_ep, dev->current_rx_req);
1304 1304
1305 while (!list_empty(&dev->rx_reqs)) { 1305 while (!list_empty(&dev->rx_reqs)) {
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d97b16b52efa..bf8be2a41a4a 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -69,10 +69,9 @@ config USB_EHCI_BIG_ENDIAN_DESC
69 default y 69 default y
70 70
71config USB_EHCI_FSL 71config USB_EHCI_FSL
72 bool 72 bool "Support for Freescale on-chip EHCI USB controller"
73 depends on USB_EHCI_HCD 73 depends on USB_EHCI_HCD && FSL_SOC
74 select USB_EHCI_ROOT_HUB_TT 74 select USB_EHCI_ROOT_HUB_TT
75 default y if MPC834x || PPC_MPC831x
76 ---help--- 75 ---help---
77 Variation of ARC USB block used in some Freescale chips. 76 Variation of ARC USB block used in some Freescale chips.
78 77
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 4caa6a8b9a37..b8ad55aff842 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -862,18 +862,18 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
862 /* reschedule QH iff another request is queued */ 862 /* reschedule QH iff another request is queued */
863 if (!list_empty (&qh->qtd_list) 863 if (!list_empty (&qh->qtd_list)
864 && HC_IS_RUNNING (hcd->state)) { 864 && HC_IS_RUNNING (hcd->state)) {
865 int schedule_status; 865 rc = qh_schedule(ehci, qh);
866 866
867 schedule_status = qh_schedule (ehci, qh); 867 /* An error here likely indicates handshake failure
868 spin_unlock_irqrestore (&ehci->lock, flags); 868 * or no space left in the schedule. Neither fault
869 869 * should happen often ...
870 if (schedule_status != 0) { 870 *
871 // shouldn't happen often, but ... 871 * FIXME kill the now-dysfunctional queued urbs
872 // FIXME kill those tds' urbs 872 */
873 err ("can't reschedule qh %p, err %d", 873 if (rc != 0)
874 qh, schedule_status); 874 ehci_err(ehci,
875 } 875 "can't reschedule qh %p, err %d",
876 return status; 876 qh, rc);
877 } 877 }
878 break; 878 break;
879 879
@@ -1014,7 +1014,7 @@ MODULE_LICENSE ("GPL");
1014#endif 1014#endif
1015 1015
1016#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ 1016#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
1017 !defined(PS3_SYSTEM_BUS_DRIVER) 1017 !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER)
1018#error "missing bus glue for ehci-hcd" 1018#error "missing bus glue for ehci-hcd"
1019#endif 1019#endif
1020 1020
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index ba370c56172c..59be276ccd9d 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1766,6 +1766,7 @@ sl811h_suspend(struct platform_device *dev, pm_message_t state)
1766 retval = sl811h_bus_suspend(hcd); 1766 retval = sl811h_bus_suspend(hcd);
1767 break; 1767 break;
1768 case PM_EVENT_SUSPEND: 1768 case PM_EVENT_SUSPEND:
1769 case PM_EVENT_HIBERNATE:
1769 case PM_EVENT_PRETHAW: /* explicitly discard hw state */ 1770 case PM_EVENT_PRETHAW: /* explicitly discard hw state */
1770 port_power(sl811, 0); 1771 port_power(sl811, 0);
1771 break; 1772 break;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index ac283b09a63f..3033d6945202 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3213,15 +3213,20 @@ static int u132_suspend(struct platform_device *pdev, pm_message_t state)
3213 dev_err(&u132->platform_dev->dev, "device is being removed\n"); 3213 dev_err(&u132->platform_dev->dev, "device is being removed\n");
3214 return -ESHUTDOWN; 3214 return -ESHUTDOWN;
3215 } else { 3215 } else {
3216 int retval = 0; 3216 int retval = 0, ports;
3217 if (state.event == PM_EVENT_FREEZE) { 3217
3218 switch (state.event) {
3219 case PM_EVENT_FREEZE:
3218 retval = u132_bus_suspend(hcd); 3220 retval = u132_bus_suspend(hcd);
3219 } else if (state.event == PM_EVENT_SUSPEND) { 3221 break;
3220 int ports = MAX_U132_PORTS; 3222 case PM_EVENT_SUSPEND:
3223 case PM_EVENT_HIBERNATE:
3224 ports = MAX_U132_PORTS;
3221 while (ports-- > 0) { 3225 while (ports-- > 0) {
3222 port_power(u132, ports, 0); 3226 port_power(u132, ports, 0);
3223 } 3227 }
3224 } 3228 break;
3229 }
3225 if (retval == 0) 3230 if (retval == 0)
3226 pdev->dev.power.power_state = state; 3231 pdev->dev.power.power_state = state;
3227 return retval; 3232 return retval;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 8208496dfc63..c730d20eec66 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -61,6 +61,7 @@
61#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002 61#define USB_DEVICE_ID_VERNIER_GOTEMP 0x0002
62#define USB_DEVICE_ID_VERNIER_SKIP 0x0003 62#define USB_DEVICE_ID_VERNIER_SKIP 0x0003
63#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 63#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
64#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
64 65
65#define USB_VENDOR_ID_MICROCHIP 0x04d8 66#define USB_VENDOR_ID_MICROCHIP 0x04d8
66#define USB_DEVICE_ID_PICDEM 0x000c 67#define USB_DEVICE_ID_PICDEM 0x000c
@@ -92,6 +93,7 @@ static struct usb_device_id ld_usb_table [] = {
92 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) }, 93 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
93 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) }, 94 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
94 { USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICDEM) }, 95 { USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICDEM) },
96 { USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) },
95 { } /* Terminating entry */ 97 { } /* Terminating entry */
96}; 98};
97MODULE_DEVICE_TABLE(usb, ld_usb_table); 99MODULE_DEVICE_TABLE(usb, ld_usb_table);
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 67e2fc20eeeb..03368edf3f22 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -59,13 +59,14 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr,
59{ 59{
60 struct usb_interface *intf = to_usb_interface(dev); 60 struct usb_interface *intf = to_usb_interface(dev);
61 struct trancevibrator *tv = usb_get_intfdata(intf); 61 struct trancevibrator *tv = usb_get_intfdata(intf);
62 int temp, retval; 62 int temp, retval, old;
63 63
64 temp = simple_strtoul(buf, NULL, 10); 64 temp = simple_strtoul(buf, NULL, 10);
65 if (temp > 255) 65 if (temp > 255)
66 temp = 255; 66 temp = 255;
67 else if (temp < 0) 67 else if (temp < 0)
68 temp = 0; 68 temp = 0;
69 old = tv->speed;
69 tv->speed = temp; 70 tv->speed = temp;
70 71
71 dev_dbg(&tv->udev->dev, "speed = %d\n", tv->speed); 72 dev_dbg(&tv->udev->dev, "speed = %d\n", tv->speed);
@@ -77,6 +78,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr,
77 tv->speed, /* speed value */ 78 tv->speed, /* speed value */
78 0, NULL, 0, USB_CTRL_GET_TIMEOUT); 79 0, NULL, 0, USB_CTRL_GET_TIMEOUT);
79 if (retval) { 80 if (retval) {
81 tv->speed = old;
80 dev_dbg(&tv->udev->dev, "retval = %d\n", retval); 82 dev_dbg(&tv->udev->dev, "retval = %d\n", retval);
81 return retval; 83 return retval;
82 } 84 }
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 90dcc625f70d..76db2fef4657 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -393,8 +393,8 @@ static const char *ftdi_chip_name[] = {
393#define FTDI_STATUS_B1_MASK (FTDI_RS_BI) 393#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
394/* End TIOCMIWAIT */ 394/* End TIOCMIWAIT */
395 395
396#define FTDI_IMPL_ASYNC_FLAGS = ( ASYNC_SPD_HI | ASYNC_SPD_VHI \ 396#define FTDI_IMPL_ASYNC_FLAGS = (ASYNC_SPD_HI | ASYNC_SPD_VHI \
397 ASYNC_SPD_CUST | ASYNC_SPD_SHI | ASYNC_SPD_WARP ) 397 | ASYNC_SPD_CUST | ASYNC_SPD_SHI | ASYNC_SPD_WARP)
398 398
399/* function prototypes for a FTDI serial converter */ 399/* function prototypes for a FTDI serial converter */
400static int ftdi_sio_probe (struct usb_serial *serial, const struct usb_device_id *id); 400static int ftdi_sio_probe (struct usb_serial *serial, const struct usb_device_id *id);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5e8bf1bc1e50..af2674c57414 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -113,6 +113,9 @@ static int option_send_setup(struct usb_serial_port *port);
113#define NOVATELWIRELESS_VENDOR_ID 0x1410 113#define NOVATELWIRELESS_VENDOR_ID 0x1410
114#define DELL_VENDOR_ID 0x413C 114#define DELL_VENDOR_ID 0x413C
115 115
116#define KYOCERA_VENDOR_ID 0x0c88
117#define KYOCERA_PRODUCT_KPC680 0x180a
118
116#define ANYDATA_VENDOR_ID 0x16d5 119#define ANYDATA_VENDOR_ID 0x16d5
117#define ANYDATA_PRODUCT_ADU_E100A 0x6501 120#define ANYDATA_PRODUCT_ADU_E100A 0x6501
118#define ANYDATA_PRODUCT_ADU_500A 0x6502 121#define ANYDATA_PRODUCT_ADU_500A 0x6502
@@ -121,6 +124,8 @@ static int option_send_setup(struct usb_serial_port *port);
121#define BANDRICH_PRODUCT_C100_1 0x1002 124#define BANDRICH_PRODUCT_C100_1 0x1002
122#define BANDRICH_PRODUCT_C100_2 0x1003 125#define BANDRICH_PRODUCT_C100_2 0x1003
123 126
127#define QUALCOMM_VENDOR_ID 0x05C6
128
124static struct usb_device_id option_ids[] = { 129static struct usb_device_id option_ids[] = {
125 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 130 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
126 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 131 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -174,18 +179,23 @@ static struct usb_device_id option_ids[] = {
174 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2410) }, /* Novatel EU740 */ 179 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2410) }, /* Novatel EU740 */
175 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4100) }, /* Novatel U727 */ 180 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4100) }, /* Novatel U727 */
176 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4400) }, /* Novatel MC950 */ 181 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4400) }, /* Novatel MC950 */
182 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x5010) }, /* Novatel U727 */
177 { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ 183 { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */
178 { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ 184 { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
179 { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ 185 { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */
180 { USB_DEVICE(DELL_VENDOR_ID, 0x8117) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */ 186 { USB_DEVICE(DELL_VENDOR_ID, 0x8117) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO ExpressCard == Novatel Merlin XV620 CDMA/EV-DO */
181 { USB_DEVICE(DELL_VENDOR_ID, 0x8118) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */ 187 { USB_DEVICE(DELL_VENDOR_ID, 0x8118) }, /* Dell Wireless 5510 Mobile Broadband HSDPA ExpressCard == Novatel Merlin XU870 HSDPA/3G */
182 { USB_DEVICE(DELL_VENDOR_ID, 0x8128) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */ 188 { USB_DEVICE(DELL_VENDOR_ID, 0x8128) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite E720 CDMA/EV-DO */
189 { USB_DEVICE(DELL_VENDOR_ID, 0x8129) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite ET620 CDMA/EV-DO */
190 { USB_DEVICE(DELL_VENDOR_ID, 0x8133) }, /* Dell Wireless 5720 == Novatel EV620 CDMA/EV-DO */
183 { USB_DEVICE(DELL_VENDOR_ID, 0x8136) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */ 191 { USB_DEVICE(DELL_VENDOR_ID, 0x8136) }, /* Dell Wireless HSDPA 5520 == Novatel Expedite EU860D */
184 { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ 192 { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */
185 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, 193 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },
186 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, 194 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
187 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, 195 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
188 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, 196 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
197 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
198 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
189 { } /* Terminating entry */ 199 { } /* Terminating entry */
190}; 200};
191MODULE_DEVICE_TABLE(usb, option_ids); 201MODULE_DEVICE_TABLE(usb, option_ids);
@@ -247,10 +257,10 @@ static int debug;
247struct option_port_private { 257struct option_port_private {
248 /* Input endpoints and buffer for this port */ 258 /* Input endpoints and buffer for this port */
249 struct urb *in_urbs[N_IN_URB]; 259 struct urb *in_urbs[N_IN_URB];
250 char in_buffer[N_IN_URB][IN_BUFLEN]; 260 u8 *in_buffer[N_IN_URB];
251 /* Output endpoints and buffer for this port */ 261 /* Output endpoints and buffer for this port */
252 struct urb *out_urbs[N_OUT_URB]; 262 struct urb *out_urbs[N_OUT_URB];
253 char out_buffer[N_OUT_URB][OUT_BUFLEN]; 263 u8 *out_buffer[N_OUT_URB];
254 unsigned long out_busy; /* Bit vector of URBs in use */ 264 unsigned long out_busy; /* Bit vector of URBs in use */
255 265
256 /* Settings for the port */ 266 /* Settings for the port */
@@ -737,9 +747,10 @@ static int option_send_setup(struct usb_serial_port *port)
737 747
738static int option_startup(struct usb_serial *serial) 748static int option_startup(struct usb_serial *serial)
739{ 749{
740 int i, err; 750 int i, j, err;
741 struct usb_serial_port *port; 751 struct usb_serial_port *port;
742 struct option_port_private *portdata; 752 struct option_port_private *portdata;
753 u8 *buffer;
743 754
744 dbg("%s", __FUNCTION__); 755 dbg("%s", __FUNCTION__);
745 756
@@ -753,6 +764,20 @@ static int option_startup(struct usb_serial *serial)
753 return (1); 764 return (1);
754 } 765 }
755 766
767 for (j = 0; j < N_IN_URB; j++) {
768 buffer = (u8 *)__get_free_page(GFP_KERNEL);
769 if (!buffer)
770 goto bail_out_error;
771 portdata->in_buffer[j] = buffer;
772 }
773
774 for (j = 0; j < N_OUT_URB; j++) {
775 buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
776 if (!buffer)
777 goto bail_out_error2;
778 portdata->out_buffer[j] = buffer;
779 }
780
756 usb_set_serial_port_data(port, portdata); 781 usb_set_serial_port_data(port, portdata);
757 782
758 if (! port->interrupt_in_urb) 783 if (! port->interrupt_in_urb)
@@ -766,6 +791,16 @@ static int option_startup(struct usb_serial *serial)
766 option_setup_urbs(serial); 791 option_setup_urbs(serial);
767 792
768 return (0); 793 return (0);
794
795bail_out_error2:
796 for (j = 0; j < N_OUT_URB; j++)
797 kfree(portdata->out_buffer[j]);
798bail_out_error:
799 for (j = 0; j < N_IN_URB; j++)
800 if (portdata->in_buffer[j])
801 free_page((unsigned long)portdata->in_buffer[j]);
802 kfree(portdata);
803 return 1;
769} 804}
770 805
771static void option_shutdown(struct usb_serial *serial) 806static void option_shutdown(struct usb_serial *serial)
@@ -794,12 +829,14 @@ static void option_shutdown(struct usb_serial *serial)
794 for (j = 0; j < N_IN_URB; j++) { 829 for (j = 0; j < N_IN_URB; j++) {
795 if (portdata->in_urbs[j]) { 830 if (portdata->in_urbs[j]) {
796 usb_free_urb(portdata->in_urbs[j]); 831 usb_free_urb(portdata->in_urbs[j]);
832 free_page((unsigned long)portdata->in_buffer[j]);
797 portdata->in_urbs[j] = NULL; 833 portdata->in_urbs[j] = NULL;
798 } 834 }
799 } 835 }
800 for (j = 0; j < N_OUT_URB; j++) { 836 for (j = 0; j < N_OUT_URB; j++) {
801 if (portdata->out_urbs[j]) { 837 if (portdata->out_urbs[j]) {
802 usb_free_urb(portdata->out_urbs[j]); 838 usb_free_urb(portdata->out_urbs[j]);
839 kfree(portdata->out_buffer[j]);
803 portdata->out_urbs[j] = NULL; 840 portdata->out_urbs[j] = NULL;
804 } 841 }
805 } 842 }
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 4c925e3e8a63..e3d44ae8d448 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -178,7 +178,6 @@ static struct usb_device_id id_table [] = {
178 178
179 { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */ 179 { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */
180 { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */ 180 { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */
181 { USB_DEVICE(0x05C6, 0x6613), .driver_info = DEVICE_1_PORT }, /* Onda H600/ZTE MF330 */
182 181
183 { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER}, 182 { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER},
184 { } 183 { }
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index a41ce21c0697..958f5b17847c 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -150,13 +150,14 @@ void usb_stor_transparent_scsi_command(struct scsi_cmnd *srb,
150 150
151/* Copy a buffer of length buflen to/from the srb's transfer buffer. 151/* Copy a buffer of length buflen to/from the srb's transfer buffer.
152 * Update the **sgptr and *offset variables so that the next copy will 152 * Update the **sgptr and *offset variables so that the next copy will
153 * pick up from where this one left off. */ 153 * pick up from where this one left off.
154 154 */
155unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, 155unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
156 unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr, 156 unsigned int buflen, struct scsi_cmnd *srb, struct scatterlist **sgptr,
157 unsigned int *offset, enum xfer_buf_dir dir) 157 unsigned int *offset, enum xfer_buf_dir dir)
158{ 158{
159 unsigned int cnt; 159 unsigned int cnt;
160 struct scatterlist *sg = *sgptr;
160 161
161 /* We have to go through the list one entry 162 /* We have to go through the list one entry
162 * at a time. Each s-g entry contains some number of pages, and 163 * at a time. Each s-g entry contains some number of pages, and
@@ -164,22 +165,23 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
164 * in kernel-addressable memory then kmap() will return its address. 165 * in kernel-addressable memory then kmap() will return its address.
165 * If the page is not directly accessible -- such as a user buffer 166 * If the page is not directly accessible -- such as a user buffer
166 * located in high memory -- then kmap() will map it to a temporary 167 * located in high memory -- then kmap() will map it to a temporary
167 * position in the kernel's virtual address space. */ 168 * position in the kernel's virtual address space.
168 struct scatterlist *sg = *sgptr; 169 */
169 170
170 if (!sg) 171 if (!sg)
171 sg = scsi_sglist(srb); 172 sg = scsi_sglist(srb);
173 buflen = min(buflen, scsi_bufflen(srb));
172 174
173 /* This loop handles a single s-g list entry, which may 175 /* This loop handles a single s-g list entry, which may
174 * include multiple pages. Find the initial page structure 176 * include multiple pages. Find the initial page structure
175 * and the starting offset within the page, and update 177 * and the starting offset within the page, and update
176 * the *offset and **sgptr values for the next loop. */ 178 * the *offset and **sgptr values for the next loop.
179 */
177 cnt = 0; 180 cnt = 0;
178 while (cnt < buflen) { 181 while (cnt < buflen && sg) {
179 struct page *page = sg_page(sg) + 182 struct page *page = sg_page(sg) +
180 ((sg->offset + *offset) >> PAGE_SHIFT); 183 ((sg->offset + *offset) >> PAGE_SHIFT);
181 unsigned int poff = 184 unsigned int poff = (sg->offset + *offset) & (PAGE_SIZE-1);
182 (sg->offset + *offset) & (PAGE_SIZE-1);
183 unsigned int sglen = sg->length - *offset; 185 unsigned int sglen = sg->length - *offset;
184 186
185 if (sglen > buflen - cnt) { 187 if (sglen > buflen - cnt) {
@@ -222,14 +224,15 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
222} 224}
223 225
224/* Store the contents of buffer into srb's transfer buffer and set the 226/* Store the contents of buffer into srb's transfer buffer and set the
225 * SCSI residue. */ 227 * SCSI residue.
228 */
226void usb_stor_set_xfer_buf(unsigned char *buffer, 229void usb_stor_set_xfer_buf(unsigned char *buffer,
227 unsigned int buflen, struct scsi_cmnd *srb) 230 unsigned int buflen, struct scsi_cmnd *srb)
228{ 231{
229 unsigned int offset = 0; 232 unsigned int offset = 0;
230 struct scatterlist *sg = NULL; 233 struct scatterlist *sg = NULL;
231 234
232 usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, 235 buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset,
233 TO_XFER_BUF); 236 TO_XFER_BUF);
234 if (buflen < scsi_bufflen(srb)) 237 if (buflen < scsi_bufflen(srb))
235 scsi_set_resid(srb, scsi_bufflen(srb) - buflen); 238 scsi_set_resid(srb, scsi_bufflen(srb) - buflen);
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index d9f4912f873d..5780ed15f1ad 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -891,17 +891,6 @@ int usb_stor_Bulk_max_lun(struct us_data *us)
891 if (result > 0) 891 if (result > 0)
892 return us->iobuf[0]; 892 return us->iobuf[0];
893 893
894 /*
895 * Some devices (i.e. Iomega Zip100) need this -- apparently
896 * the bulk pipes get STALLed when the GetMaxLUN request is
897 * processed. This is, in theory, harmless to all other devices
898 * (regardless of if they stall or not).
899 */
900 if (result == -EPIPE) {
901 usb_stor_clear_halt(us, us->recv_bulk_pipe);
902 usb_stor_clear_halt(us, us->send_bulk_pipe);
903 }
904
905 /* 894 /*
906 * Some devices don't like GetMaxLUN. They may STALL the control 895 * Some devices don't like GetMaxLUN. They may STALL the control
907 * pipe, they may return a zero-length result, they may do nothing at 896 * pipe, they may return a zero-length result, they may do nothing at
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fe12737e0e2b..99679a8cfa02 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -357,7 +357,7 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200,
357 US_FL_FIX_CAPACITY), 357 US_FL_FIX_CAPACITY),
358 358
359/* Reported by Emil Larsson <emil@swip.net> */ 359/* Reported by Emil Larsson <emil@swip.net> */
360UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0101, 360UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110,
361 "NIKON", 361 "NIKON",
362 "NIKON DSC D80", 362 "NIKON DSC D80",
363 US_SC_DEVICE, US_PR_DEVICE, NULL, 363 US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -759,6 +759,18 @@ UNUSUAL_DEV( 0x0595, 0x4343, 0x0000, 0x2210,
759 "Digital Camera EX-20 DSC", 759 "Digital Camera EX-20 DSC",
760 US_SC_8070, US_PR_DEVICE, NULL, 0 ), 760 US_SC_8070, US_PR_DEVICE, NULL, 0 ),
761 761
762/* Reported by Andre Welter <a.r.welter@gmx.de>
763 * This antique device predates the release of the Bulk-only Transport
764 * spec, and if it gets a Get-Max-LUN then it requires the host to do a
765 * Clear-Halt on the bulk endpoints. The SINGLE_LUN flag will prevent
766 * us from sending the request.
767 */
768UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100,
769 "Iomega",
770 "ZIP 100",
771 US_SC_DEVICE, US_PR_DEVICE, NULL,
772 US_FL_SINGLE_LUN ),
773
762/* Reported by <Hendryk.Pfeiffer@gmx.de> */ 774/* Reported by <Hendryk.Pfeiffer@gmx.de> */
763UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, 775UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000,
764 "LaCie", 776 "LaCie",
@@ -1412,6 +1424,17 @@ UNUSUAL_DEV( 0x0ed1, 0x7636, 0x0103, 0x0103,
1412 US_SC_DEVICE, US_PR_DEVICE, NULL, 1424 US_SC_DEVICE, US_PR_DEVICE, NULL,
1413 US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64), 1425 US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64),
1414 1426
1427/* Patch by Leonid Petrov mail at lpetrov.net
1428 * Reported by Robert Spitzenpfeil <robert@spitzenpfeil.org>
1429 * http://www.qbik.ch/usb/devices/showdev.php?id=1705
1430 * Updated to 103 device by MJ Ray mjr at phonecoop.coop
1431 */
1432UNUSUAL_DEV( 0x0f19, 0x0103, 0x0100, 0x0100,
1433 "Oracom Co., Ltd",
1434 "ORC-200M",
1435 US_SC_DEVICE, US_PR_DEVICE, NULL,
1436 US_FL_IGNORE_RESIDUE ),
1437
1415/* David Kuehling <dvdkhlng@gmx.de>: 1438/* David Kuehling <dvdkhlng@gmx.de>:
1416 * for MP3-Player AVOX WSX-300ER (bought in Japan). Reports lots of SCSI 1439 * for MP3-Player AVOX WSX-300ER (bought in Japan). Reports lots of SCSI
1417 * errors when trying to write. 1440 * errors when trying to write.
@@ -1477,6 +1500,15 @@ UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x0110,
1477 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init, 1500 US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init,
1478 0 ), 1501 0 ),
1479 1502
1503/* Reported by Fabio Venturi <f.venturi@tdnet.it>
1504 * The device reports a vendor-specific bDeviceClass.
1505 */
1506UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1507 "Actions Semiconductor",
1508 "Mtp device",
1509 US_SC_DEVICE, US_PR_DEVICE, NULL,
1510 0),
1511
1480/* Reported by Kevin Lloyd <linux@sierrawireless.com> 1512/* Reported by Kevin Lloyd <linux@sierrawireless.com>
1481 * Entry is needed for the initializer function override, 1513 * Entry is needed for the initializer function override,
1482 * which instructs the device to load as a modem 1514 * which instructs the device to load as a modem
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index d775eb6590b6..62f9c6e387cc 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -1913,61 +1913,6 @@ static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1913 par->mmaped = 1; 1913 par->mmaped = 1;
1914 return 0; 1914 return 0;
1915} 1915}
1916
1917static struct {
1918 u32 yoffset;
1919 u8 r[2][256];
1920 u8 g[2][256];
1921 u8 b[2][256];
1922} atyfb_save;
1923
1924static void atyfb_save_palette(struct atyfb_par *par, int enter)
1925{
1926 int i, tmp;
1927
1928 for (i = 0; i < 256; i++) {
1929 tmp = aty_ld_8(DAC_CNTL, par) & 0xfc;
1930 if (M64_HAS(EXTRA_BRIGHT))
1931 tmp |= 0x2;
1932 aty_st_8(DAC_CNTL, tmp, par);
1933 aty_st_8(DAC_MASK, 0xff, par);
1934
1935 aty_st_8(DAC_R_INDEX, i, par);
1936 atyfb_save.r[enter][i] = aty_ld_8(DAC_DATA, par);
1937 atyfb_save.g[enter][i] = aty_ld_8(DAC_DATA, par);
1938 atyfb_save.b[enter][i] = aty_ld_8(DAC_DATA, par);
1939 aty_st_8(DAC_W_INDEX, i, par);
1940 aty_st_8(DAC_DATA, atyfb_save.r[1 - enter][i], par);
1941 aty_st_8(DAC_DATA, atyfb_save.g[1 - enter][i], par);
1942 aty_st_8(DAC_DATA, atyfb_save.b[1 - enter][i], par);
1943 }
1944}
1945
1946static void atyfb_palette(int enter)
1947{
1948 struct atyfb_par *par;
1949 struct fb_info *info;
1950 int i;
1951
1952 for (i = 0; i < FB_MAX; i++) {
1953 info = registered_fb[i];
1954 if (info && info->fbops == &atyfb_ops) {
1955 par = (struct atyfb_par *) info->par;
1956
1957 atyfb_save_palette(par, enter);
1958 if (enter) {
1959 atyfb_save.yoffset = info->var.yoffset;
1960 info->var.yoffset = 0;
1961 set_off_pitch(par, info);
1962 } else {
1963 info->var.yoffset = atyfb_save.yoffset;
1964 set_off_pitch(par, info);
1965 }
1966 aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
1967 break;
1968 }
1969 }
1970}
1971#endif /* __sparc__ */ 1916#endif /* __sparc__ */
1972 1917
1973 1918
@@ -2670,10 +2615,6 @@ static int __devinit aty_init(struct fb_info *info)
2670 goto aty_init_exit; 2615 goto aty_init_exit;
2671 } 2616 }
2672 2617
2673#ifdef __sparc__
2674 atyfb_save_palette(par, 0);
2675#endif
2676
2677#ifdef CONFIG_FB_ATY_CT 2618#ifdef CONFIG_FB_ATY_CT
2678 if (!noaccel && M64_HAS(INTEGRATED)) 2619 if (!noaccel && M64_HAS(INTEGRATED))
2679 aty_init_cursor(info); 2620 aty_init_cursor(info);
@@ -2900,8 +2841,6 @@ static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
2900 2841
2901#ifdef __sparc__ 2842#ifdef __sparc__
2902 2843
2903extern void (*prom_palette) (int);
2904
2905static int __devinit atyfb_setup_sparc(struct pci_dev *pdev, 2844static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
2906 struct fb_info *info, unsigned long addr) 2845 struct fb_info *info, unsigned long addr)
2907{ 2846{
@@ -3536,9 +3475,6 @@ static int __devinit atyfb_pci_probe(struct pci_dev *pdev, const struct pci_devi
3536 goto err_release_io; 3475 goto err_release_io;
3537 3476
3538#ifdef __sparc__ 3477#ifdef __sparc__
3539 if (!prom_palette)
3540 prom_palette = atyfb_palette;
3541
3542 /* 3478 /*
3543 * Add /dev/fb mmap values. 3479 * Add /dev/fb mmap values.
3544 */ 3480 */
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index 41f6dbf61be7..fdc9f43ec30a 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/fb.h> 17#include <linux/fb.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/uaccess.h>
19 20
20#include <asm/io.h> 21#include <asm/io.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index 6796ba62c3c6..777389c40988 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -459,7 +459,7 @@ static int chipsfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
459 459
460 if (state.event == pdev->dev.power.power_state.event) 460 if (state.event == pdev->dev.power.power_state.event)
461 return 0; 461 return 0;
462 if (state.event != PM_EVENT_SUSPEND) 462 if (!(state.event & PM_EVENT_SLEEP))
463 goto done; 463 goto done;
464 464
465 acquire_console_sem(); 465 acquire_console_sem();
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 74517b1b26a6..596652d2831f 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1066,7 +1066,7 @@ static int nvidiafb_suspend(struct pci_dev *dev, pm_message_t mesg)
1066 acquire_console_sem(); 1066 acquire_console_sem();
1067 par->pm_state = mesg.event; 1067 par->pm_state = mesg.event;
1068 1068
1069 if (mesg.event == PM_EVENT_SUSPEND) { 1069 if (mesg.event & PM_EVENT_SLEEP) {
1070 fb_set_suspend(info, 1); 1070 fb_set_suspend(info, 1);
1071 nvidiafb_blank(FB_BLANK_POWERDOWN, info); 1071 nvidiafb_blank(FB_BLANK_POWERDOWN, info);
1072 nvidia_write_regs(par, &par->SavedReg); 1072 nvidia_write_regs(par, &par->SavedReg);
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 10f912df2dad..97facb121c73 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1046,7 +1046,7 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
1046 switch (val) { 1046 switch (val) {
1047 case CPUFREQ_ADJUST: 1047 case CPUFREQ_ADJUST:
1048 case CPUFREQ_INCOMPATIBLE: 1048 case CPUFREQ_INCOMPATIBLE:
1049 printk(KERN_DEBUG "min dma period: %d ps, " 1049 pr_debug("min dma period: %d ps, "
1050 "new clock %d kHz\n", pxafb_display_dma_period(var), 1050 "new clock %d kHz\n", pxafb_display_dma_period(var),
1051 policy->max); 1051 policy->max);
1052 // TODO: fill in min/max values 1052 // TODO: fill in min/max values
@@ -1361,7 +1361,7 @@ static int __init pxafb_parse_options(struct device *dev, char *options)
1361} 1361}
1362#endif 1362#endif
1363 1363
1364int __init pxafb_probe(struct platform_device *dev) 1364static int __init pxafb_probe(struct platform_device *dev)
1365{ 1365{
1366 struct pxafb_info *fbi; 1366 struct pxafb_info *fbi;
1367 struct pxafb_mach_info *inf; 1367 struct pxafb_mach_info *inf;
@@ -1486,7 +1486,7 @@ static struct platform_driver pxafb_driver = {
1486}; 1486};
1487 1487
1488#ifndef MODULE 1488#ifndef MODULE
1489int __devinit pxafb_setup(char *options) 1489static int __devinit pxafb_setup(char *options)
1490{ 1490{
1491# ifdef CONFIG_FB_PXA_PARAMETERS 1491# ifdef CONFIG_FB_PXA_PARAMETERS
1492 if (options) 1492 if (options)
@@ -1501,7 +1501,7 @@ MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
1501# endif 1501# endif
1502#endif 1502#endif
1503 1503
1504int __devinit pxafb_init(void) 1504static int __devinit pxafb_init(void)
1505{ 1505{
1506#ifndef MODULE 1506#ifndef MODULE
1507 char *option = NULL; 1507 char *option = NULL;
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 963a454b7074..4deaac05b938 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -9,6 +9,7 @@
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/fb.h> 10#include <linux/fb.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/uaccess.h>
12 13
13#include <asm/oplib.h> 14#include <asm/oplib.h>
14#include <asm/fbio.h> 15#include <asm/fbio.h>
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index be27b9c1ed72..93361656316c 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -44,7 +44,7 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
44 44
45static int mtrr __devinitdata = 3; /* enable mtrr by default */ 45static int mtrr __devinitdata = 3; /* enable mtrr by default */
46static int blank = 1; /* enable blanking by default */ 46static int blank = 1; /* enable blanking by default */
47static int ypan __devinitdata = 1; /* 0: scroll, 1: ypan, 2: ywrap */ 47static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
48static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */ 48static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */
49static int nocrtc __devinitdata; /* ignore CRTC settings */ 49static int nocrtc __devinitdata; /* ignore CRTC settings */
50static int noedid __devinitdata; /* don't try DDC transfers */ 50static int noedid __devinitdata; /* don't try DDC transfers */
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index afcdc69e37d6..254d115cafab 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -402,6 +402,18 @@ config IT8712F_WDT
402 To compile this driver as a module, choose M here: the 402 To compile this driver as a module, choose M here: the
403 module will be called it8712f_wdt. 403 module will be called it8712f_wdt.
404 404
405config HP_WATCHDOG
406 tristate "HP Proliant iLO 2 Hardware Watchdog Timer"
407 depends on X86
408 help
409 A software monitoring watchdog and NMI sourcing driver. This driver
410 will detect lockups and provide stack trace. Also, when an NMI
411 occurs this driver will make the necessary BIOS calls to log
412 the cause of the NMI. This is a driver that will only load on a
413 HP ProLiant system with a minimum of iLO2 support.
414 To compile this driver as a module, choose M here: the
415 module will be called hpwdt.
416
405config SC1200_WDT 417config SC1200_WDT
406 tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog" 418 tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog"
407 depends on X86 419 depends on X86
@@ -633,6 +645,19 @@ config WDT_RM9K_GPI
633 To compile this driver as a module, choose M here: the 645 To compile this driver as a module, choose M here: the
634 module will be called rm9k_wdt. 646 module will be called rm9k_wdt.
635 647
648config SIBYTE_WDOG
649 tristate "Sibyte SoC hardware watchdog"
650 depends on CPU_SB1
651 help
652 Watchdog driver for the built in watchdog hardware in Sibyte
653 SoC processors. There are apparently two watchdog timers
654 on such processors; this driver supports only the first one,
655 because currently Linux only supports exporting one watchdog
656 to userspace.
657
658 To compile this driver as a loadable module, choose M here.
659 The module will be called sb_wdog.
660
636config AR7_WDT 661config AR7_WDT
637 tristate "TI AR7 Watchdog Timer" 662 tristate "TI AR7 Watchdog Timer"
638 depends on AR7 663 depends on AR7
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index ebc21146d40c..f3fb170fe5c6 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
67obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o 67obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
68obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o iTCO_vendor_support.o 68obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o iTCO_vendor_support.o
69obj-$(CONFIG_IT8712F_WDT) += it8712f_wdt.o 69obj-$(CONFIG_IT8712F_WDT) += it8712f_wdt.o
70obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o
70obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o 71obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
71obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o 72obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
72obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o 73obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o
@@ -92,6 +93,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
92obj-$(CONFIG_INDYDOG) += indydog.o 93obj-$(CONFIG_INDYDOG) += indydog.o
93obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o 94obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
94obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o 95obj-$(CONFIG_WDT_RM9K_GPI) += rm9k_wdt.o
96obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
95obj-$(CONFIG_AR7_WDT) += ar7_wdt.o 97obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
96obj-$(CONFIG_TXX9_WDT) += txx9wdt.o 98obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
97 99
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 472be10f0686..1237113dc14a 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -29,6 +29,7 @@
29 29
30#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args) 30#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __func__, __LINE__, ## args)
31#define stampit() stamp("here i am") 31#define stampit() stamp("here i am")
32#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
32 33
33#define WATCHDOG_NAME "bfin-wdt" 34#define WATCHDOG_NAME "bfin-wdt"
34#define PFX WATCHDOG_NAME ": " 35#define PFX WATCHDOG_NAME ": "
@@ -445,19 +446,19 @@ static int __init bfin_wdt_init(void)
445 446
446 ret = register_reboot_notifier(&bfin_wdt_notifier); 447 ret = register_reboot_notifier(&bfin_wdt_notifier);
447 if (ret) { 448 if (ret) {
448 printk(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret); 449 pr_init(KERN_ERR PFX "cannot register reboot notifier (err=%d)\n", ret);
449 return ret; 450 return ret;
450 } 451 }
451 452
452 ret = misc_register(&bfin_wdt_miscdev); 453 ret = misc_register(&bfin_wdt_miscdev);
453 if (ret) { 454 if (ret) {
454 printk(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n", 455 pr_init(KERN_ERR PFX "cannot register miscdev on minor=%d (err=%d)\n",
455 WATCHDOG_MINOR, ret); 456 WATCHDOG_MINOR, ret);
456 unregister_reboot_notifier(&bfin_wdt_notifier); 457 unregister_reboot_notifier(&bfin_wdt_notifier);
457 return ret; 458 return ret;
458 } 459 }
459 460
460 printk(KERN_INFO PFX "initialized: timeout=%d sec (nowayout=%d)\n", 461 pr_init(KERN_INFO PFX "initialized: timeout=%d sec (nowayout=%d)\n",
461 timeout, nowayout); 462 timeout, nowayout);
462 463
463 return 0; 464 return 0;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
new file mode 100644
index 000000000000..a2e174b09fe7
--- /dev/null
+++ b/drivers/watchdog/hpwdt.c
@@ -0,0 +1,926 @@
1/*
2 * HP WatchDog Driver
3 * based on
4 *
5 * SoftDog 0.05: A Software Watchdog Device
6 *
7 * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
8 * Thomas Mingarelli <thomas.mingarelli@hp.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation
13 *
14 */
15
16#include <linux/device.h>
17#include <linux/fs.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/irq.h>
22#include <linux/kernel.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/kdebug.h>
27#include <linux/moduleparam.h>
28#include <linux/notifier.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/reboot.h>
32#include <linux/sched.h>
33#include <linux/timer.h>
34#include <linux/types.h>
35#include <linux/uaccess.h>
36#include <linux/watchdog.h>
37#include <linux/dmi.h>
38#include <linux/efi.h>
39#include <linux/string.h>
40#include <linux/bootmem.h>
41#include <linux/slab.h>
42#include <asm/dmi.h>
43#include <asm/desc.h>
44#include <asm/kdebug.h>
45
46#define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */
47#define CRU_BIOS_SIGNATURE_VALUE 0x55524324
48#define PCI_BIOS32_PARAGRAPH_LEN 16
49#define PCI_ROM_BASE1 0x000F0000
50#define ROM_SIZE 0x10000
51
52struct bios32_service_dir {
53 u32 signature;
54 u32 entry_point;
55 u8 revision;
56 u8 length;
57 u8 checksum;
58 u8 reserved[5];
59};
60
61/*
62 * smbios_entry_point - defines SMBIOS entry point structure
63 *
64 * anchor[4] - anchor string (_SM_)
65 * checksum - checksum of the entry point structure
66 * length - length of the entry point structure
67 * major_ver - major version (02h for revision 2.1)
68 * minor_ver - minor version (01h for revision 2.1)
69 * max_struct_size - size of the largest SMBIOS structure
70 * revision - entry point structure revision implemented
71 * formatted_area[5] - reserved
72 * intermediate_anchor[5] - intermediate anchor string (_DMI_)
73 * intermediate_checksum - intermediate checksum
74 * table_length - structure table length
75 * table_address - structure table address
76 * table_num_structs - number of SMBIOS structures present
77 * bcd_revision - BCD revision
78 */
79struct smbios_entry_point {
80 u8 anchor[4];
81 u8 checksum;
82 u8 length;
83 u8 major_ver;
84 u8 minor_ver;
85 u16 max_struct_size;
86 u8 revision;
87 u8 formatted_area[5];
88 u8 intermediate_anchor[5];
89 u8 intermediate_checksum;
90 u16 table_length;
91 u64 table_address;
92 u16 table_num_structs;
93 u8 bcd_revision;
94};
95
96/* type 212 */
97struct smbios_cru64_info {
98 u8 type;
99 u8 byte_length;
100 u16 handle;
101 u32 signature;
102 u64 physical_address;
103 u32 double_length;
104 u32 double_offset;
105};
106#define SMBIOS_CRU64_INFORMATION 212
107
108struct cmn_registers {
109 union {
110 struct {
111 u8 ral;
112 u8 rah;
113 u16 rea2;
114 };
115 u32 reax;
116 } u1;
117 union {
118 struct {
119 u8 rbl;
120 u8 rbh;
121 u8 reb2l;
122 u8 reb2h;
123 };
124 u32 rebx;
125 } u2;
126 union {
127 struct {
128 u8 rcl;
129 u8 rch;
130 u16 rec2;
131 };
132 u32 recx;
133 } u3;
134 union {
135 struct {
136 u8 rdl;
137 u8 rdh;
138 u16 red2;
139 };
140 u32 redx;
141 } u4;
142
143 u32 resi;
144 u32 redi;
145 u16 rds;
146 u16 res;
147 u32 reflags;
148} __attribute__((packed));
149
150#define DEFAULT_MARGIN 30
151static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */
152static unsigned int reload; /* the computed soft_margin */
153static int nowayout = WATCHDOG_NOWAYOUT;
154static char expect_release;
155static unsigned long hpwdt_is_open;
156
157static void __iomem *pci_mem_addr; /* the PCI-memory address */
158static unsigned long __iomem *hpwdt_timer_reg;
159static unsigned long __iomem *hpwdt_timer_con;
160
161static DEFINE_SPINLOCK(rom_lock);
162
163static void *cru_rom_addr;
164
165static struct cmn_registers cmn_regs;
166
167static struct pci_device_id hpwdt_devices[] = {
168 {
169 .vendor = PCI_VENDOR_ID_COMPAQ,
170 .device = 0xB203,
171 .subvendor = PCI_ANY_ID,
172 .subdevice = PCI_ANY_ID,
173 },
174 {0}, /* terminate list */
175};
176MODULE_DEVICE_TABLE(pci, hpwdt_devices);
177
178/*
179 * bios_checksum
180 */
181static int __devinit bios_checksum(const char __iomem *ptr, int len)
182{
183 char sum = 0;
184 int i;
185
186 /*
187 * calculate checksum of size bytes. This should add up
188 * to zero if we have a valid header.
189 */
190 for (i = 0; i < len; i++)
191 sum += ptr[i];
192
193 return ((sum == 0) && (len > 0));
194}
195
196#ifndef CONFIG_X86_64
197/* --32 Bit Bios------------------------------------------------------------ */
198
199#define HPWDT_ARCH 32
200
201asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
202 unsigned long *pRomEntry)
203{
204 asm("pushl %ebp \n\t"
205 "movl %esp, %ebp \n\t"
206 "pusha \n\t"
207 "pushf \n\t"
208 "push %es \n\t"
209 "push %ds \n\t"
210 "pop %es \n\t"
211 "movl 8(%ebp),%eax \n\t"
212 "movl 4(%eax),%ebx \n\t"
213 "movl 8(%eax),%ecx \n\t"
214 "movl 12(%eax),%edx \n\t"
215 "movl 16(%eax),%esi \n\t"
216 "movl 20(%eax),%edi \n\t"
217 "movl (%eax),%eax \n\t"
218 "push %cs \n\t"
219 "call *12(%ebp) \n\t"
220 "pushf \n\t"
221 "pushl %eax \n\t"
222 "movl 8(%ebp),%eax \n\t"
223 "movl %ebx,4(%eax) \n\t"
224 "movl %ecx,8(%eax) \n\t"
225 "movl %edx,12(%eax) \n\t"
226 "movl %esi,16(%eax) \n\t"
227 "movl %edi,20(%eax) \n\t"
228 "movw %ds,24(%eax) \n\t"
229 "movw %es,26(%eax) \n\t"
230 "popl %ebx \n\t"
231 "movl %ebx,(%eax) \n\t"
232 "popl %ebx \n\t"
233 "movl %ebx,28(%eax) \n\t"
234 "pop %es \n\t"
235 "popf \n\t"
236 "popa \n\t"
237 "leave \n\t" "ret");
238}
239
240/*
241 * cru_detect
242 *
243 * Routine Description:
244 * This function uses the 32-bit BIOS Service Directory record to
245 * search for a $CRU record.
246 *
247 * Return Value:
248 * 0 : SUCCESS
249 * <0 : FAILURE
250 */
251static int __devinit cru_detect(unsigned long map_entry,
252 unsigned long map_offset)
253{
254 void *bios32_map;
255 unsigned long *bios32_entrypoint;
256 unsigned long cru_physical_address;
257 unsigned long cru_length;
258 unsigned long physical_bios_base = 0;
259 unsigned long physical_bios_offset = 0;
260 int retval = -ENODEV;
261
262 bios32_map = ioremap(map_entry, (2 * PAGE_SIZE));
263
264 if (bios32_map == NULL)
265 return -ENODEV;
266
267 bios32_entrypoint = bios32_map + map_offset;
268
269 cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
270
271 asminline_call(&cmn_regs, bios32_entrypoint);
272
273 if (cmn_regs.u1.ral != 0) {
274 printk(KERN_WARNING
275 "hpwdt: Call succeeded but with an error: 0x%x\n",
276 cmn_regs.u1.ral);
277 } else {
278 physical_bios_base = cmn_regs.u2.rebx;
279 physical_bios_offset = cmn_regs.u4.redx;
280 cru_length = cmn_regs.u3.recx;
281 cru_physical_address =
282 physical_bios_base + physical_bios_offset;
283
284 /* If the values look OK, then map it in. */
285 if ((physical_bios_base + physical_bios_offset)) {
286 cru_rom_addr =
287 ioremap(cru_physical_address, cru_length);
288 if (cru_rom_addr)
289 retval = 0;
290 }
291
292 printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n",
293 physical_bios_base);
294 printk(KERN_DEBUG "hpwdt: CRU Offset Address: 0x%lx\n",
295 physical_bios_offset);
296 printk(KERN_DEBUG "hpwdt: CRU Length: 0x%lx\n",
297 cru_length);
298 printk(KERN_DEBUG "hpwdt: CRU Mapped Address: 0x%x\n",
299 (unsigned int)&cru_rom_addr);
300 }
301 iounmap(bios32_map);
302 return retval;
303}
304
305/*
306 * bios32_present
307 *
308 * Routine Description:
309 * This function finds the 32-bit BIOS Service Directory
310 *
311 * Return Value:
312 * 0 : SUCCESS
313 * <0 : FAILURE
314 */
315static int __devinit bios32_present(const char __iomem *p)
316{
317 struct bios32_service_dir *bios_32_ptr;
318 int length;
319 unsigned long map_entry, map_offset;
320
321 bios_32_ptr = (struct bios32_service_dir *) p;
322
323 /*
324 * Search for signature by checking equal to the swizzled value
325 * instead of calling another routine to perform a strcmp.
326 */
327 if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) {
328 length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN;
329 if (bios_checksum(p, length)) {
330 /*
331 * According to the spec, we're looking for the
332 * first 4KB-aligned address below the entrypoint
333 * listed in the header. The Service Directory code
334 * is guaranteed to occupy no more than 2 4KB pages.
335 */
336 map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1);
337 map_offset = bios_32_ptr->entry_point - map_entry;
338
339 return cru_detect(map_entry, map_offset);
340 }
341 }
342 return -ENODEV;
343}
344
345static int __devinit detect_cru_service(void)
346{
347 char __iomem *p, *q;
348 int rc = -1;
349
350 /*
351 * Search from 0x0f0000 through 0x0fffff, inclusive.
352 */
353 p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
354 if (p == NULL)
355 return -ENOMEM;
356
357 for (q = p; q < p + ROM_SIZE; q += 16) {
358 rc = bios32_present(q);
359 if (!rc)
360 break;
361 }
362 iounmap(p);
363 return rc;
364}
365
366#else
367/* --64 Bit Bios------------------------------------------------------------ */
368
369#define HPWDT_ARCH 64
370
371asmlinkage void asminline_call(struct cmn_registers *pi86Regs,
372 unsigned long *pRomEntry)
373{
374 asm("pushq %rbp \n\t"
375 "movq %rsp, %rbp \n\t"
376 "pushq %rax \n\t"
377 "pushq %rbx \n\t"
378 "pushq %rdx \n\t"
379 "pushq %r12 \n\t"
380 "pushq %r9 \n\t"
381 "movq %rsi, %r12 \n\t"
382 "movq %rdi, %r9 \n\t"
383 "movl 4(%r9),%ebx \n\t"
384 "movl 8(%r9),%ecx \n\t"
385 "movl 12(%r9),%edx \n\t"
386 "movl 16(%r9),%esi \n\t"
387 "movl 20(%r9),%edi \n\t"
388 "movl (%r9),%eax \n\t"
389 "call *%r12 \n\t"
390 "pushfq \n\t"
391 "popq %r12 \n\t"
392 "popfq \n\t"
393 "movl %eax, (%r9) \n\t"
394 "movl %ebx, 4(%r9) \n\t"
395 "movl %ecx, 8(%r9) \n\t"
396 "movl %edx, 12(%r9) \n\t"
397 "movl %esi, 16(%r9) \n\t"
398 "movl %edi, 20(%r9) \n\t"
399 "movq %r12, %rax \n\t"
400 "movl %eax, 28(%r9) \n\t"
401 "popq %r9 \n\t"
402 "popq %r12 \n\t"
403 "popq %rdx \n\t"
404 "popq %rbx \n\t"
405 "popq %rax \n\t"
406 "leave \n\t" "ret");
407}
408
409/*
410 * dmi_find_cru
411 *
412 * Routine Description:
413 * This function checks wether or not a SMBIOS/DMI record is
414 * the 64bit CRU info or not
415 *
416 * Return Value:
417 * 0 : SUCCESS - if record found
418 * <0 : FAILURE - if record not found
419 */
420static void __devinit dmi_find_cru(const struct dmi_header *dm)
421{
422 struct smbios_cru64_info *smbios_cru64_ptr;
423 unsigned long cru_physical_address;
424
425 if (dm->type == SMBIOS_CRU64_INFORMATION) {
426 smbios_cru64_ptr = (struct smbios_cru64_info *) dm;
427 if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) {
428 cru_physical_address =
429 smbios_cru64_ptr->physical_address +
430 smbios_cru64_ptr->double_offset;
431 cru_rom_addr = ioremap(cru_physical_address,
432 smbios_cru64_ptr->double_length);
433 }
434 }
435}
436
437/*
438 * dmi_table
439 *
440 * Routine Description:
441 * Decode the SMBIOS/DMI table and check if we have a 64bit CRU record
442 * or not.
443 *
444 * We have to be cautious here. We have seen BIOSes with DMI pointers
445 * pointing to completely the wrong place for example
446 */
447static void __devinit dmi_table(u8 *buf, int len, int num,
448 void (*decode)(const struct dmi_header *))
449{
450 u8 *data = buf;
451 int i = 0;
452
453 /*
454 * Stop when we see all the items the table claimed to have
455 * OR we run off the end of the table (also happens)
456 */
457 while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
458 const struct dmi_header *dm = (const struct dmi_header *)data;
459
460 /*
461 * We want to know the total length (formated area and strings)
462 * before decoding to make sure we won't run off the table in
463 * dmi_decode or dmi_string
464 */
465 data += dm->length;
466 while ((data - buf < len - 1) && (data[0] || data[1]))
467 data++;
468 if (data - buf < len - 1)
469 decode(dm);
470 data += 2;
471 i++;
472 }
473}
474
475/*
476 * smbios_present
477 *
478 * Routine Description:
479 * This function parses the SMBIOS entry point table to retrieve
480 * the 64 bit CRU Service.
481 *
482 * Return Value:
483 * 0 : SUCCESS
484 * <0 : FAILURE
485 */
486static int __devinit smbios_present(const char __iomem *p)
487{
488 struct smbios_entry_point *eps =
489 (struct smbios_entry_point *) p;
490 int length;
491 u8 *buf;
492
493 /* check if we have indeed the SMBIOS table entry point */
494 if ((strncmp((char *)eps->anchor, "_SM_",
495 sizeof(eps->anchor))) == 0) {
496 length = eps->length;
497
498 /* SMBIOS v2.1 implementation might use 0x1e */
499 if ((length == 0x1e) &&
500 (eps->major_ver == 2) &&
501 (eps->minor_ver == 1))
502 length = 0x1f;
503
504 /*
505 * Now we will check:
506 * - SMBIOS checksum must be 0
507 * - intermediate anchor should be _DMI_
508 * - intermediate checksum should be 0
509 */
510 if ((bios_checksum(p, length)) &&
511 (strncmp((char *)eps->intermediate_anchor, "_DMI_",
512 sizeof(eps->intermediate_anchor)) == 0) &&
513 (bios_checksum(p+0x10, 15))) {
514 buf = ioremap(eps->table_address, eps->table_length);
515 if (buf == NULL)
516 return -ENODEV;
517
518
519 /* Scan the DMI table for the 64 bit CRU service */
520 dmi_table(buf, eps->table_length,
521 eps->table_num_structs, dmi_find_cru);
522
523 iounmap(buf);
524 return 0;
525 }
526 }
527
528 return -ENODEV;
529}
530
531static int __devinit smbios_scan_machine(void)
532{
533 char __iomem *p, *q;
534 int rc;
535
536 if (efi_enabled) {
537 if (efi.smbios == EFI_INVALID_TABLE_ADDR)
538 return -ENODEV;
539
540 p = ioremap(efi.smbios, 32);
541 if (p == NULL)
542 return -ENOMEM;
543
544 rc = smbios_present(p);
545 iounmap(p);
546 } else {
547 /*
548 * Search from 0x0f0000 through 0x0fffff, inclusive.
549 */
550 p = ioremap(PCI_ROM_BASE1, ROM_SIZE);
551 if (p == NULL)
552 return -ENOMEM;
553
554 for (q = p; q < p + ROM_SIZE; q += 16) {
555 rc = smbios_present(q);
556 if (!rc) {
557 break;
558 }
559 }
560 iounmap(p);
561 }
562}
563
564static int __devinit detect_cru_service(void)
565{
566 cru_rom_addr = NULL;
567
568 smbios_scan_machine(); /* will become dmi_walk(dmi_find_cru); */
569
570 /* if cru_rom_addr has been set then we found a CRU service */
571 return ((cru_rom_addr != NULL)? 0: -ENODEV);
572}
573
574/* ------------------------------------------------------------------------- */
575
576#endif
577
578/*
579 * NMI Handler
580 */
581static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
582 void *data)
583{
584 static unsigned long rom_pl;
585 static int die_nmi_called;
586
587 if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
588 return NOTIFY_OK;
589
590 spin_lock_irqsave(&rom_lock, rom_pl);
591 if (!die_nmi_called)
592 asminline_call(&cmn_regs, cru_rom_addr);
593 die_nmi_called = 1;
594 spin_unlock_irqrestore(&rom_lock, rom_pl);
595 if (cmn_regs.u1.ral == 0) {
596 printk(KERN_WARNING "hpwdt: An NMI occurred, "
597 "but unable to determine source.\n");
598 } else {
599 panic("An NMI occurred, please see the Integrated "
600 "Management Log for details.\n");
601 }
602
603 return NOTIFY_STOP;
604}
605
606/*
607 * Watchdog operations
608 */
609static void hpwdt_start(void)
610{
611 reload = (soft_margin * 1000) / 128;
612 iowrite16(reload, hpwdt_timer_reg);
613 iowrite16(0x85, hpwdt_timer_con);
614}
615
616static void hpwdt_stop(void)
617{
618 unsigned long data;
619
620 data = ioread16(hpwdt_timer_con);
621 data &= 0xFE;
622 iowrite16(data, hpwdt_timer_con);
623}
624
625static void hpwdt_ping(void)
626{
627 iowrite16(reload, hpwdt_timer_reg);
628}
629
630static int hpwdt_change_timer(int new_margin)
631{
632 /* Arbitrary, can't find the card's limits */
633 if (new_margin < 30 || new_margin > 600) {
634 printk(KERN_WARNING
635 "hpwdt: New value passed in is invalid: %d seconds.\n",
636 new_margin);
637 return -EINVAL;
638 }
639
640 soft_margin = new_margin;
641 printk(KERN_DEBUG
642 "hpwdt: New timer passed in is %d seconds.\n",
643 new_margin);
644 reload = (soft_margin * 1000) / 128;
645
646 return 0;
647}
648
649/*
650 * /dev/watchdog handling
651 */
652static int hpwdt_open(struct inode *inode, struct file *file)
653{
654 /* /dev/watchdog can only be opened once */
655 if (test_and_set_bit(0, &hpwdt_is_open))
656 return -EBUSY;
657
658 /* Start the watchdog */
659 hpwdt_start();
660 hpwdt_ping();
661
662 return nonseekable_open(inode, file);
663}
664
665static int hpwdt_release(struct inode *inode, struct file *file)
666{
667 /* Stop the watchdog */
668 if (expect_release == 42) {
669 hpwdt_stop();
670 } else {
671 printk(KERN_CRIT
672 "hpwdt: Unexpected close, not stopping watchdog!\n");
673 hpwdt_ping();
674 }
675
676 expect_release = 0;
677
678 /* /dev/watchdog is being closed, make sure it can be re-opened */
679 clear_bit(0, &hpwdt_is_open);
680
681 return 0;
682}
683
684static ssize_t hpwdt_write(struct file *file, const char __user *data,
685 size_t len, loff_t *ppos)
686{
687 /* See if we got the magic character 'V' and reload the timer */
688 if (len) {
689 if (!nowayout) {
690 size_t i;
691
692 /* note: just in case someone wrote the magic character
693 * five months ago... */
694 expect_release = 0;
695
696 /* scan to see whether or not we got the magic char. */
697 for (i = 0; i != len; i++) {
698 char c;
699 if (get_user(c, data+i))
700 return -EFAULT;
701 if (c == 'V')
702 expect_release = 42;
703 }
704 }
705
706 /* someone wrote to us, we should reload the timer */
707 hpwdt_ping();
708 }
709
710 return len;
711}
712
713static struct watchdog_info ident = {
714 .options = WDIOF_SETTIMEOUT |
715 WDIOF_KEEPALIVEPING |
716 WDIOF_MAGICCLOSE,
717 .identity = "HP iLO2 HW Watchdog Timer",
718};
719
720static long hpwdt_ioctl(struct file *file, unsigned int cmd,
721 unsigned long arg)
722{
723 void __user *argp = (void __user *)arg;
724 int __user *p = argp;
725 int new_margin;
726 int ret = -ENOTTY;
727
728 switch (cmd) {
729 case WDIOC_GETSUPPORT:
730 ret = 0;
731 if (copy_to_user(argp, &ident, sizeof(ident)))
732 ret = -EFAULT;
733 break;
734
735 case WDIOC_GETSTATUS:
736 case WDIOC_GETBOOTSTATUS:
737 ret = put_user(0, p);
738 break;
739
740 case WDIOC_KEEPALIVE:
741 hpwdt_ping();
742 ret = 0;
743 break;
744
745 case WDIOC_SETTIMEOUT:
746 ret = get_user(new_margin, p);
747 if (ret)
748 break;
749
750 ret = hpwdt_change_timer(new_margin);
751 if (ret)
752 break;
753
754 hpwdt_ping();
755 /* Fall */
756 case WDIOC_GETTIMEOUT:
757 ret = put_user(soft_margin, p);
758 break;
759 }
760 return ret;
761}
762
763/*
764 * Kernel interfaces
765 */
766static struct file_operations hpwdt_fops = {
767 .owner = THIS_MODULE,
768 .llseek = no_llseek,
769 .write = hpwdt_write,
770 .unlocked_ioctl = hpwdt_ioctl,
771 .open = hpwdt_open,
772 .release = hpwdt_release,
773};
774
775static struct miscdevice hpwdt_miscdev = {
776 .minor = WATCHDOG_MINOR,
777 .name = "watchdog",
778 .fops = &hpwdt_fops,
779};
780
781static struct notifier_block die_notifier = {
782 .notifier_call = hpwdt_pretimeout,
783 .priority = 0x7FFFFFFF,
784};
785
786/*
787 * Init & Exit
788 */
789
790static int __devinit hpwdt_init_one(struct pci_dev *dev,
791 const struct pci_device_id *ent)
792{
793 int retval;
794
795 /*
796 * First let's find out if we are on an iLO2 server. We will
797 * not run on a legacy ASM box.
798 */
799 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) {
800 dev_warn(&dev->dev,
801 "This server does not have an iLO2 ASIC.\n");
802 return -ENODEV;
803 }
804
805 if (pci_enable_device(dev)) {
806 dev_warn(&dev->dev,
807 "Not possible to enable PCI Device: 0x%x:0x%x.\n",
808 ent->vendor, ent->device);
809 return -ENODEV;
810 }
811
812 pci_mem_addr = pci_iomap(dev, 1, 0x80);
813 if (!pci_mem_addr) {
814 dev_warn(&dev->dev,
815 "Unable to detect the iLO2 server memory.\n");
816 retval = -ENOMEM;
817 goto error_pci_iomap;
818 }
819 hpwdt_timer_reg = pci_mem_addr + 0x70;
820 hpwdt_timer_con = pci_mem_addr + 0x72;
821
822 /* Make sure that we have a valid soft_margin */
823 if (hpwdt_change_timer(soft_margin))
824 hpwdt_change_timer(DEFAULT_MARGIN);
825
826 /*
827 * We need to map the ROM to get the CRU service.
828 * For 32 bit Operating Systems we need to go through the 32 Bit
829 * BIOS Service Directory
830 * For 64 bit Operating Systems we get that service through SMBIOS.
831 */
832 retval = detect_cru_service();
833 if (retval < 0) {
834 dev_warn(&dev->dev,
835 "Unable to detect the %d Bit CRU Service.\n",
836 HPWDT_ARCH);
837 goto error_get_cru;
838 }
839
840 /*
841 * We know this is the only CRU call we need to make so lets keep as
842 * few instructions as possible once the NMI comes in.
843 */
844 cmn_regs.u1.rah = 0x0D;
845 cmn_regs.u1.ral = 0x02;
846
847 retval = register_die_notifier(&die_notifier);
848 if (retval != 0) {
849 dev_warn(&dev->dev,
850 "Unable to register a die notifier (err=%d).\n",
851 retval);
852 goto error_die_notifier;
853 }
854
855 retval = misc_register(&hpwdt_miscdev);
856 if (retval < 0) {
857 dev_warn(&dev->dev,
858 "Unable to register miscdev on minor=%d (err=%d).\n",
859 WATCHDOG_MINOR, retval);
860 goto error_misc_register;
861 }
862
863 printk(KERN_INFO
864 "hp Watchdog Timer Driver: 1.00"
865 ", timer margin: %d seconds( nowayout=%d).\n",
866 soft_margin, nowayout);
867
868 return 0;
869
870error_misc_register:
871 unregister_die_notifier(&die_notifier);
872error_die_notifier:
873 if (cru_rom_addr)
874 iounmap(cru_rom_addr);
875error_get_cru:
876 pci_iounmap(dev, pci_mem_addr);
877error_pci_iomap:
878 pci_disable_device(dev);
879 return retval;
880}
881
882static void __devexit hpwdt_exit(struct pci_dev *dev)
883{
884 if (!nowayout)
885 hpwdt_stop();
886
887 misc_deregister(&hpwdt_miscdev);
888 unregister_die_notifier(&die_notifier);
889
890 if (cru_rom_addr)
891 iounmap(cru_rom_addr);
892 pci_iounmap(dev, pci_mem_addr);
893 pci_disable_device(dev);
894}
895
896static struct pci_driver hpwdt_driver = {
897 .name = "hpwdt",
898 .id_table = hpwdt_devices,
899 .probe = hpwdt_init_one,
900 .remove = __devexit_p(hpwdt_exit),
901};
902
903static void __exit hpwdt_cleanup(void)
904{
905 pci_unregister_driver(&hpwdt_driver);
906}
907
908static int __init hpwdt_init(void)
909{
910 return pci_register_driver(&hpwdt_driver);
911}
912
913MODULE_AUTHOR("Tom Mingarelli");
914MODULE_DESCRIPTION("hp watchdog driver");
915MODULE_LICENSE("GPL");
916MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
917
918module_param(soft_margin, int, 0);
919MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds");
920
921module_param(nowayout, int, 0);
922MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
923 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
924
925module_init(hpwdt_init);
926module_exit(hpwdt_cleanup);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 98451747d3cd..789831b3fa00 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -45,10 +45,13 @@
45#include <linux/completion.h> 45#include <linux/completion.h>
46#include <linux/jiffies.h> 46#include <linux/jiffies.h>
47#include <linux/watchdog.h> 47#include <linux/watchdog.h>
48#include <linux/platform_device.h>
49
48#include <asm/io.h> 50#include <asm/io.h>
49#include <asm/uaccess.h> 51#include <asm/uaccess.h>
50 52
51#include <asm/mach-au1x00/au1000.h> 53#include <asm/mach-au1x00/au1000.h>
54#include <asm/gpio.h>
52 55
53#define MTX1_WDT_INTERVAL (5 * HZ) 56#define MTX1_WDT_INTERVAL (5 * HZ)
54 57
@@ -61,6 +64,7 @@ static struct {
61 volatile int queue; 64 volatile int queue;
62 int default_ticks; 65 int default_ticks;
63 unsigned long inuse; 66 unsigned long inuse;
67 unsigned gpio;
64} mtx1_wdt_device; 68} mtx1_wdt_device;
65 69
66static void mtx1_wdt_trigger(unsigned long unused) 70static void mtx1_wdt_trigger(unsigned long unused)
@@ -73,7 +77,8 @@ static void mtx1_wdt_trigger(unsigned long unused)
73 * toggle GPIO2_15 77 * toggle GPIO2_15
74 */ 78 */
75 tmp = au_readl(GPIO2_DIR); 79 tmp = au_readl(GPIO2_DIR);
76 tmp = (tmp & ~(1<<15)) | ((~tmp) & (1<<15)); 80 tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) |
81 ((~tmp) & (1 << mtx1_wdt_device.gpio));
77 au_writel (tmp, GPIO2_DIR); 82 au_writel (tmp, GPIO2_DIR);
78 83
79 if (mtx1_wdt_device.queue && ticks) 84 if (mtx1_wdt_device.queue && ticks)
@@ -93,7 +98,7 @@ static void mtx1_wdt_start(void)
93{ 98{
94 if (!mtx1_wdt_device.queue) { 99 if (!mtx1_wdt_device.queue) {
95 mtx1_wdt_device.queue = 1; 100 mtx1_wdt_device.queue = 1;
96 au_writel (au_readl(GPIO2_DIR) | (u32)(1<<15), GPIO2_DIR); 101 gpio_set_value(mtx1_wdt_device.gpio, 1);
97 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 102 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
98 } 103 }
99 mtx1_wdt_device.running++; 104 mtx1_wdt_device.running++;
@@ -103,7 +108,7 @@ static int mtx1_wdt_stop(void)
103{ 108{
104 if (mtx1_wdt_device.queue) { 109 if (mtx1_wdt_device.queue) {
105 mtx1_wdt_device.queue = 0; 110 mtx1_wdt_device.queue = 0;
106 au_writel (au_readl(GPIO2_DIR) & ~((u32)(1<<15)), GPIO2_DIR); 111 gpio_set_value(mtx1_wdt_device.gpio, 0);
107 } 112 }
108 113
109 ticks = mtx1_wdt_device.default_ticks; 114 ticks = mtx1_wdt_device.default_ticks;
@@ -197,10 +202,12 @@ static struct miscdevice mtx1_wdt_misc = {
197}; 202};
198 203
199 204
200static int __init mtx1_wdt_init(void) 205static int mtx1_wdt_probe(struct platform_device *pdev)
201{ 206{
202 int ret; 207 int ret;
203 208
209 mtx1_wdt_device.gpio = pdev->resource[0].start;
210
204 if ((ret = misc_register(&mtx1_wdt_misc)) < 0) { 211 if ((ret = misc_register(&mtx1_wdt_misc)) < 0) {
205 printk(KERN_ERR " mtx-1_wdt : failed to register\n"); 212 printk(KERN_ERR " mtx-1_wdt : failed to register\n");
206 return ret; 213 return ret;
@@ -222,13 +229,30 @@ static int __init mtx1_wdt_init(void)
222 return 0; 229 return 0;
223} 230}
224 231
225static void __exit mtx1_wdt_exit(void) 232static int mtx1_wdt_remove(struct platform_device *pdev)
226{ 233{
227 if (mtx1_wdt_device.queue) { 234 if (mtx1_wdt_device.queue) {
228 mtx1_wdt_device.queue = 0; 235 mtx1_wdt_device.queue = 0;
229 wait_for_completion(&mtx1_wdt_device.stop); 236 wait_for_completion(&mtx1_wdt_device.stop);
230 } 237 }
231 misc_deregister(&mtx1_wdt_misc); 238 misc_deregister(&mtx1_wdt_misc);
239 return 0;
240}
241
242static struct platform_driver mtx1_wdt = {
243 .probe = mtx1_wdt_probe,
244 .remove = mtx1_wdt_remove,
245 .driver.name = "mtx1-wdt",
246};
247
248static int __init mtx1_wdt_init(void)
249{
250 return platform_driver_register(&mtx1_wdt);
251}
252
253static void __exit mtx1_wdt_exit(void)
254{
255 platform_driver_unregister(&mtx1_wdt);
232} 256}
233 257
234module_init(mtx1_wdt_init); 258module_init(mtx1_wdt_init);
@@ -237,3 +261,4 @@ module_exit(mtx1_wdt_exit);
237MODULE_AUTHOR("Michael Stickel, Florian Fainelli"); 261MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
238MODULE_DESCRIPTION("Driver for the MTX-1 watchdog"); 262MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
239MODULE_LICENSE("GPL"); 263MODULE_LICENSE("GPL");
264MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c
new file mode 100644
index 000000000000..b94431433695
--- /dev/null
+++ b/drivers/watchdog/sb_wdog.c
@@ -0,0 +1,353 @@
1/*
2 * Watchdog driver for SiByte SB1 SoCs
3 *
4 * Copyright (C) 2007 OnStor, Inc. * Andrew Sharp <andy.sharp@onstor.com>
5 *
6 * This driver is intended to make the second of two hardware watchdogs
7 * on the Sibyte 12XX and 11XX SoCs available to the user. There are two
8 * such devices available on the SoC, but it seems that there isn't an
9 * enumeration class for watchdogs in Linux like there is for RTCs.
10 * The second is used rather than the first because it uses IRQ 1,
11 * thereby avoiding all that IRQ 0 problematic nonsense.
12 *
13 * I have not tried this driver on a 1480 processor; it might work
14 * just well enough to really screw things up.
15 *
16 * It is a simple timer, and there is an interrupt that is raised the
17 * first time the timer expires. The second time it expires, the chip
18 * is reset and there is no way to redirect that NMI. Which could
19 * be problematic in some cases where this chip is sitting on the HT
20 * bus and has just taken responsibility for providing a cache block.
21 * Since the reset can't be redirected to the external reset pin, it is
22 * possible that other HT connected processors might hang and not reset.
23 * For Linux, a soft reset would probably be even worse than a hard reset.
24 * There you have it.
25 *
26 * The timer takes 23 bits of a 64 bit register (?) as a count value,
27 * and decrements the count every microsecond, for a max value of
28 * 0x7fffff usec or about 8.3ish seconds.
29 *
30 * This watchdog borrows some user semantics from the softdog driver,
31 * in that if you close the fd, it leaves the watchdog running, unless
32 * you previously wrote a 'V' to the fd, in which case it disables
33 * the watchdog when you close the fd like some other drivers.
34 *
35 * Based on various other watchdog drivers, which are probably all
36 * loosely based on something Alan Cox wrote years ago.
37 *
38 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
39 * http://www.redhat.com
40 *
41 * This program is free software; you can redistribute it and/or
42 * modify it under the terms of the GNU General Public License
43 * version 1 or 2 as published by the Free Software Foundation.
44 *
45 */
46#include <linux/module.h>
47#include <linux/io.h>
48#include <linux/uaccess.h>
49#include <linux/fs.h>
50#include <linux/reboot.h>
51#include <linux/miscdevice.h>
52#include <linux/watchdog.h>
53#include <linux/interrupt.h>
54
55#include <asm/sibyte/sb1250.h>
56#include <asm/sibyte/sb1250_regs.h>
57#include <asm/sibyte/sb1250_int.h>
58#include <asm/sibyte/sb1250_scd.h>
59
60
61/*
62 * set the initial count value of a timer
63 *
64 * wdog is the iomem address of the cfg register
65 */
66void sbwdog_set(char __iomem *wdog, unsigned long t)
67{
68 __raw_writeb(0, wdog - 0x10);
69 __raw_writeq(t & 0x7fffffUL, wdog);
70}
71
72/*
73 * cause the timer to [re]load it's initial count and start counting
74 * all over again
75 *
76 * wdog is the iomem address of the cfg register
77 */
78void sbwdog_pet(char __iomem *wdog)
79{
80 __raw_writeb(__raw_readb(wdog) | 1, wdog);
81}
82
83static unsigned long sbwdog_gate; /* keeps it to one thread only */
84static char __iomem *kern_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_0));
85static char __iomem *user_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_1));
86static unsigned long timeout = 0x7fffffUL; /* useconds: 8.3ish secs. */
87static int expect_close;
88
89static struct watchdog_info ident = {
90 .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
91 .identity = "SiByte Watchdog",
92};
93
94/*
95 * Allow only a single thread to walk the dog
96 */
97static int sbwdog_open(struct inode *inode, struct file *file)
98{
99 nonseekable_open(inode, file);
100 if (test_and_set_bit(0, &sbwdog_gate)) {
101 return -EBUSY;
102 }
103 __module_get(THIS_MODULE);
104
105 /*
106 * Activate the timer
107 */
108 sbwdog_set(user_dog, timeout);
109 __raw_writeb(1, user_dog);
110
111 return 0;
112}
113
114/*
115 * Put the dog back in the kennel.
116 */
117static int sbwdog_release(struct inode *inode, struct file *file)
118{
119 if (expect_close == 42) {
120 __raw_writeb(0, user_dog);
121 module_put(THIS_MODULE);
122 } else {
123 printk(KERN_CRIT "%s: Unexpected close, not stopping watchdog!\n",
124 ident.identity);
125 sbwdog_pet(user_dog);
126 }
127 clear_bit(0, &sbwdog_gate);
128 expect_close = 0;
129
130 return 0;
131}
132
133/*
134 * 42 - the answer
135 */
136static ssize_t sbwdog_write(struct file *file, const char __user *data,
137 size_t len, loff_t *ppos)
138{
139 int i;
140
141 if (len) {
142 /*
143 * restart the timer
144 */
145 expect_close = 0;
146
147 for (i = 0; i != len; i++) {
148 char c;
149
150 if (get_user(c, data + i)) {
151 return -EFAULT;
152 }
153 if (c == 'V') {
154 expect_close = 42;
155 }
156 }
157 sbwdog_pet(user_dog);
158 }
159
160 return len;
161}
162
163static int sbwdog_ioctl(struct inode *inode, struct file *file,
164 unsigned int cmd, unsigned long arg)
165{
166 int ret = -ENOTTY;
167 unsigned long time;
168 void __user *argp = (void __user *)arg;
169 int __user *p = argp;
170
171 switch (cmd) {
172 case WDIOC_GETSUPPORT:
173 ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
174 break;
175
176 case WDIOC_GETSTATUS:
177 case WDIOC_GETBOOTSTATUS:
178 ret = put_user(0, p);
179 break;
180
181 case WDIOC_SETTIMEOUT:
182 ret = get_user(time, p);
183 if (ret) {
184 break;
185 }
186
187 time *= 1000000;
188 if (time > 0x7fffffUL) {
189 ret = -EINVAL;
190 break;
191 }
192 timeout = time;
193 sbwdog_set(user_dog, timeout);
194 sbwdog_pet(user_dog);
195
196 case WDIOC_GETTIMEOUT:
197 /*
198 * get the remaining count from the ... count register
199 * which is 1*8 before the config register
200 */
201 ret = put_user(__raw_readq(user_dog - 8) / 1000000, p);
202 break;
203
204 case WDIOC_KEEPALIVE:
205 sbwdog_pet(user_dog);
206 ret = 0;
207 break;
208 }
209 return ret;
210}
211
212/*
213 * Notifier for system down
214 */
215static int
216sbwdog_notify_sys(struct notifier_block *this, unsigned long code, void *erf)
217{
218 if (code == SYS_DOWN || code == SYS_HALT) {
219 /*
220 * sit and sit
221 */
222 __raw_writeb(0, user_dog);
223 __raw_writeb(0, kern_dog);
224 }
225
226 return NOTIFY_DONE;
227}
228
229static const struct file_operations sbwdog_fops =
230{
231 .owner = THIS_MODULE,
232 .llseek = no_llseek,
233 .write = sbwdog_write,
234 .ioctl = sbwdog_ioctl,
235 .open = sbwdog_open,
236 .release = sbwdog_release,
237};
238
239static struct miscdevice sbwdog_miscdev =
240{
241 .minor = WATCHDOG_MINOR,
242 .name = "watchdog",
243 .fops = &sbwdog_fops,
244};
245
246static struct notifier_block sbwdog_notifier = {
247 .notifier_call = sbwdog_notify_sys,
248};
249
250/*
251 * interrupt handler
252 *
253 * doesn't do a whole lot for user, but oh so cleverly written so kernel
254 * code can use it to re-up the watchdog, thereby saving the kernel from
255 * having to create and maintain a timer, just to tickle another timer,
256 * which is just so wrong.
257 */
258irqreturn_t sbwdog_interrupt(int irq, void *addr)
259{
260 unsigned long wd_init;
261 char *wd_cfg_reg = (char *)addr;
262 u8 cfg;
263
264 cfg = __raw_readb(wd_cfg_reg);
265 wd_init = __raw_readq(wd_cfg_reg - 8) & 0x7fffff;
266
267 /*
268 * if it's the second watchdog timer, it's for those users
269 */
270 if (wd_cfg_reg == user_dog) {
271 printk(KERN_CRIT
272 "%s in danger of initiating system reset in %ld.%01ld seconds\n",
273 ident.identity, wd_init / 1000000, (wd_init / 100000) % 10);
274 } else {
275 cfg |= 1;
276 }
277
278 __raw_writeb(cfg, wd_cfg_reg);
279
280 return IRQ_HANDLED;
281}
282
283static int __init sbwdog_init(void)
284{
285 int ret;
286
287 /*
288 * register a reboot notifier
289 */
290 ret = register_reboot_notifier(&sbwdog_notifier);
291 if (ret) {
292 printk (KERN_ERR "%s: cannot register reboot notifier (err=%d)\n",
293 ident.identity, ret);
294 return ret;
295 }
296
297 /*
298 * get the resources
299 */
300 ret = misc_register(&sbwdog_miscdev);
301 if (ret == 0) {
302 printk(KERN_INFO "%s: timeout is %ld.%ld secs\n", ident.identity,
303 timeout / 1000000, (timeout / 100000) % 10);
304 }
305
306 ret = request_irq(1, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED,
307 ident.identity, (void *)user_dog);
308 if (ret) {
309 printk(KERN_ERR "%s: failed to request irq 1 - %d\n", ident.identity,
310 ret);
311 misc_deregister(&sbwdog_miscdev);
312 }
313
314 return ret;
315}
316
317static void __exit sbwdog_exit(void)
318{
319 misc_deregister(&sbwdog_miscdev);
320}
321
322module_init(sbwdog_init);
323module_exit(sbwdog_exit);
324
325MODULE_AUTHOR("Andrew Sharp <andy.sharp@onstor.com>");
326MODULE_DESCRIPTION("SiByte Watchdog");
327
328module_param(timeout, ulong, 0);
329MODULE_PARM_DESC(timeout,
330 "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)");
331
332MODULE_LICENSE("GPL");
333MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
334
335/*
336 * example code that can be put in a platform code area to utilize the
337 * first watchdog timer for the kernels own purpose.
338
339 void
340platform_wd_setup(void)
341{
342 int ret;
343
344 ret = request_irq(0, sbwdog_interrupt, IRQF_DISABLED | IRQF_SHARED,
345 "Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0));
346 if (ret) {
347 printk(KERN_CRIT "Watchdog IRQ zero(0) failed to be requested - %d\n",
348 ret);
349 }
350}
351
352
353 */